eventdev: fix inconsistency in queue config
[dpdk.git] / test / test / test_eventdev_sw.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <string.h>
36 #include <stdint.h>
37 #include <errno.h>
38 #include <unistd.h>
39 #include <sys/queue.h>
40
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_launch.h>
44 #include <rte_eal.h>
45 #include <rte_per_lcore.h>
46 #include <rte_lcore.h>
47 #include <rte_debug.h>
48 #include <rte_ethdev.h>
49 #include <rte_cycles.h>
50 #include <rte_eventdev.h>
51 #include <rte_pause.h>
52
53 #include "test.h"
54
55 #define MAX_PORTS 16
56 #define MAX_QIDS 16
57 #define NUM_PACKETS (1<<18)
58
59 static int evdev;
60
61 struct test {
62         struct rte_mempool *mbuf_pool;
63         uint8_t port[MAX_PORTS];
64         uint8_t qid[MAX_QIDS];
65         int nb_qids;
66 };
67
68 static struct rte_event release_ev;
69
70 static inline struct rte_mbuf *
71 rte_gen_arp(int portid, struct rte_mempool *mp)
72 {
73         /*
74          * len = 14 + 46
75          * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
76          */
77         static const uint8_t arp_request[] = {
78                 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
79                 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
80                 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
81                 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
82                 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
83                 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
84                 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
85                 0x00, 0x00, 0x00, 0x00
86         };
87         struct rte_mbuf *m;
88         int pkt_len = sizeof(arp_request) - 1;
89
90         m = rte_pktmbuf_alloc(mp);
91         if (!m)
92                 return 0;
93
94         memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
95                 arp_request, pkt_len);
96         rte_pktmbuf_pkt_len(m) = pkt_len;
97         rte_pktmbuf_data_len(m) = pkt_len;
98
99         RTE_SET_USED(portid);
100
101         return m;
102 }
103
104 static void
105 xstats_print(void)
106 {
107         const uint32_t XSTATS_MAX = 1024;
108         uint32_t i;
109         uint32_t ids[XSTATS_MAX];
110         uint64_t values[XSTATS_MAX];
111         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
112
113         for (i = 0; i < XSTATS_MAX; i++)
114                 ids[i] = i;
115
116         /* Device names / values */
117         int ret = rte_event_dev_xstats_names_get(evdev,
118                                         RTE_EVENT_DEV_XSTATS_DEVICE, 0,
119                                         xstats_names, ids, XSTATS_MAX);
120         if (ret < 0) {
121                 printf("%d: xstats names get() returned error\n",
122                         __LINE__);
123                 return;
124         }
125         ret = rte_event_dev_xstats_get(evdev,
126                                         RTE_EVENT_DEV_XSTATS_DEVICE,
127                                         0, ids, values, ret);
128         if (ret > (signed int)XSTATS_MAX)
129                 printf("%s %d: more xstats available than space\n",
130                                 __func__, __LINE__);
131         for (i = 0; (signed int)i < ret; i++) {
132                 printf("%d : %s : %"PRIu64"\n",
133                                 i, xstats_names[i].name, values[i]);
134         }
135
136         /* Port names / values */
137         ret = rte_event_dev_xstats_names_get(evdev,
138                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
139                                         xstats_names, ids, XSTATS_MAX);
140         ret = rte_event_dev_xstats_get(evdev,
141                                         RTE_EVENT_DEV_XSTATS_PORT, 1,
142                                         ids, values, ret);
143         if (ret > (signed int)XSTATS_MAX)
144                 printf("%s %d: more xstats available than space\n",
145                                 __func__, __LINE__);
146         for (i = 0; (signed int)i < ret; i++) {
147                 printf("%d : %s : %"PRIu64"\n",
148                                 i, xstats_names[i].name, values[i]);
149         }
150
151         /* Queue names / values */
152         ret = rte_event_dev_xstats_names_get(evdev,
153                                         RTE_EVENT_DEV_XSTATS_QUEUE, 0,
154                                         xstats_names, ids, XSTATS_MAX);
155         ret = rte_event_dev_xstats_get(evdev,
156                                         RTE_EVENT_DEV_XSTATS_QUEUE,
157                                         1, ids, values, ret);
158         if (ret > (signed int)XSTATS_MAX)
159                 printf("%s %d: more xstats available than space\n",
160                                 __func__, __LINE__);
161         for (i = 0; (signed int)i < ret; i++) {
162                 printf("%d : %s : %"PRIu64"\n",
163                                 i, xstats_names[i].name, values[i]);
164         }
165 }
166
167 /* initialization and config */
168 static inline int
169 init(struct test *t, int nb_queues, int nb_ports)
170 {
171         struct rte_event_dev_config config = {
172                         .nb_event_queues = nb_queues,
173                         .nb_event_ports = nb_ports,
174                         .nb_event_queue_flows = 1024,
175                         .nb_events_limit = 4096,
176                         .nb_event_port_dequeue_depth = 128,
177                         .nb_event_port_enqueue_depth = 128,
178         };
179         int ret;
180
181         void *temp = t->mbuf_pool; /* save and restore mbuf pool */
182
183         memset(t, 0, sizeof(*t));
184         t->mbuf_pool = temp;
185
186         ret = rte_event_dev_configure(evdev, &config);
187         if (ret < 0)
188                 printf("%d: Error configuring device\n", __LINE__);
189         return ret;
190 };
191
192 static inline int
193 create_ports(struct test *t, int num_ports)
194 {
195         int i;
196         static const struct rte_event_port_conf conf = {
197                         .new_event_threshold = 1024,
198                         .dequeue_depth = 32,
199                         .enqueue_depth = 64,
200         };
201         if (num_ports > MAX_PORTS)
202                 return -1;
203
204         for (i = 0; i < num_ports; i++) {
205                 if (rte_event_port_setup(evdev, i, &conf) < 0) {
206                         printf("Error setting up port %d\n", i);
207                         return -1;
208                 }
209                 t->port[i] = i;
210         }
211
212         return 0;
213 }
214
215 static inline int
216 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
217 {
218         int i;
219
220         /* Q creation */
221         const struct rte_event_queue_conf conf = {
222                         .schedule_type = flags,
223                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
224                         .nb_atomic_flows = 1024,
225                         .nb_atomic_order_sequences = 1024,
226         };
227
228         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
229                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
230                         printf("%d: error creating qid %d\n", __LINE__, i);
231                         return -1;
232                 }
233                 t->qid[i] = i;
234         }
235         t->nb_qids += num_qids;
236         if (t->nb_qids > MAX_QIDS)
237                 return -1;
238
239         return 0;
240 }
241
242 static inline int
243 create_atomic_qids(struct test *t, int num_qids)
244 {
245         return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
246 }
247
248 static inline int
249 create_ordered_qids(struct test *t, int num_qids)
250 {
251         return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
252 }
253
254
255 static inline int
256 create_unordered_qids(struct test *t, int num_qids)
257 {
258         return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
259 }
260
261 static inline int
262 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
263 {
264         int i;
265
266         /* Q creation */
267         static const struct rte_event_queue_conf conf = {
268                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
269                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
270         };
271
272         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
273                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
274                         printf("%d: error creating qid %d\n", __LINE__, i);
275                         return -1;
276                 }
277                 t->qid[i] = i;
278
279                 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
280                                 &t->qid[i], NULL, 1) != 1) {
281                         printf("%d: error creating link for qid %d\n",
282                                         __LINE__, i);
283                         return -1;
284                 }
285         }
286         t->nb_qids += num_qids;
287         if (t->nb_qids > MAX_QIDS)
288                 return -1;
289
290         return 0;
291 }
292
293 /* destruction */
294 static inline int
295 cleanup(struct test *t __rte_unused)
296 {
297         rte_event_dev_stop(evdev);
298         rte_event_dev_close(evdev);
299         return 0;
300 };
301
302 struct test_event_dev_stats {
303         uint64_t rx_pkts;       /**< Total packets received */
304         uint64_t rx_dropped;    /**< Total packets dropped (Eg Invalid QID) */
305         uint64_t tx_pkts;       /**< Total packets transmitted */
306
307         /** Packets received on this port */
308         uint64_t port_rx_pkts[MAX_PORTS];
309         /** Packets dropped on this port */
310         uint64_t port_rx_dropped[MAX_PORTS];
311         /** Packets inflight on this port */
312         uint64_t port_inflight[MAX_PORTS];
313         /** Packets transmitted on this port */
314         uint64_t port_tx_pkts[MAX_PORTS];
315         /** Packets received on this qid */
316         uint64_t qid_rx_pkts[MAX_QIDS];
317         /** Packets dropped on this qid */
318         uint64_t qid_rx_dropped[MAX_QIDS];
319         /** Packets transmitted on this qid */
320         uint64_t qid_tx_pkts[MAX_QIDS];
321 };
322
323 static inline int
324 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
325 {
326         static uint32_t i;
327         static uint32_t total_ids[3]; /* rx, tx and drop */
328         static uint32_t port_rx_pkts_ids[MAX_PORTS];
329         static uint32_t port_rx_dropped_ids[MAX_PORTS];
330         static uint32_t port_inflight_ids[MAX_PORTS];
331         static uint32_t port_tx_pkts_ids[MAX_PORTS];
332         static uint32_t qid_rx_pkts_ids[MAX_QIDS];
333         static uint32_t qid_rx_dropped_ids[MAX_QIDS];
334         static uint32_t qid_tx_pkts_ids[MAX_QIDS];
335
336
337         stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
338                         "dev_rx", &total_ids[0]);
339         stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
340                         "dev_drop", &total_ids[1]);
341         stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
342                         "dev_tx", &total_ids[2]);
343         for (i = 0; i < MAX_PORTS; i++) {
344                 char name[32];
345                 snprintf(name, sizeof(name), "port_%u_rx", i);
346                 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
347                                 dev_id, name, &port_rx_pkts_ids[i]);
348                 snprintf(name, sizeof(name), "port_%u_drop", i);
349                 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
350                                 dev_id, name, &port_rx_dropped_ids[i]);
351                 snprintf(name, sizeof(name), "port_%u_inflight", i);
352                 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
353                                 dev_id, name, &port_inflight_ids[i]);
354                 snprintf(name, sizeof(name), "port_%u_tx", i);
355                 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
356                                 dev_id, name, &port_tx_pkts_ids[i]);
357         }
358         for (i = 0; i < MAX_QIDS; i++) {
359                 char name[32];
360                 snprintf(name, sizeof(name), "qid_%u_rx", i);
361                 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
362                                 dev_id, name, &qid_rx_pkts_ids[i]);
363                 snprintf(name, sizeof(name), "qid_%u_drop", i);
364                 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
365                                 dev_id, name, &qid_rx_dropped_ids[i]);
366                 snprintf(name, sizeof(name), "qid_%u_tx", i);
367                 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
368                                 dev_id, name, &qid_tx_pkts_ids[i]);
369         }
370
371         return 0;
372 }
373
374 /* run_prio_packet_test
375  * This performs a basic packet priority check on the test instance passed in.
376  * It is factored out of the main priority tests as the same tests must be
377  * performed to ensure prioritization of each type of QID.
378  *
379  * Requirements:
380  *  - An initialized test structure, including mempool
381  *  - t->port[0] is initialized for both Enq / Deq of packets to the QID
382  *  - t->qid[0] is the QID to be tested
383  *  - if LB QID, the CQ must be mapped to the QID.
384  */
385 static int
386 run_prio_packet_test(struct test *t)
387 {
388         int err;
389         const uint32_t MAGIC_SEQN[] = {4711, 1234};
390         const uint32_t PRIORITY[] = {
391                 RTE_EVENT_DEV_PRIORITY_NORMAL,
392                 RTE_EVENT_DEV_PRIORITY_HIGHEST
393         };
394         unsigned int i;
395         for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
396                 /* generate pkt and enqueue */
397                 struct rte_event ev;
398                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
399                 if (!arp) {
400                         printf("%d: gen of pkt failed\n", __LINE__);
401                         return -1;
402                 }
403                 arp->seqn = MAGIC_SEQN[i];
404
405                 ev = (struct rte_event){
406                         .priority = PRIORITY[i],
407                         .op = RTE_EVENT_OP_NEW,
408                         .queue_id = t->qid[0],
409                         .mbuf = arp
410                 };
411                 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
412                 if (err < 0) {
413                         printf("%d: error failed to enqueue\n", __LINE__);
414                         return -1;
415                 }
416         }
417
418         rte_event_schedule(evdev);
419
420         struct test_event_dev_stats stats;
421         err = test_event_dev_stats_get(evdev, &stats);
422         if (err) {
423                 printf("%d: error failed to get stats\n", __LINE__);
424                 return -1;
425         }
426
427         if (stats.port_rx_pkts[t->port[0]] != 2) {
428                 printf("%d: error stats incorrect for directed port\n",
429                                 __LINE__);
430                 rte_event_dev_dump(evdev, stdout);
431                 return -1;
432         }
433
434         struct rte_event ev, ev2;
435         uint32_t deq_pkts;
436         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
437         if (deq_pkts != 1) {
438                 printf("%d: error failed to deq\n", __LINE__);
439                 rte_event_dev_dump(evdev, stdout);
440                 return -1;
441         }
442         if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
443                 printf("%d: first packet out not highest priority\n",
444                                 __LINE__);
445                 rte_event_dev_dump(evdev, stdout);
446                 return -1;
447         }
448         rte_pktmbuf_free(ev.mbuf);
449
450         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
451         if (deq_pkts != 1) {
452                 printf("%d: error failed to deq\n", __LINE__);
453                 rte_event_dev_dump(evdev, stdout);
454                 return -1;
455         }
456         if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
457                 printf("%d: second packet out not lower priority\n",
458                                 __LINE__);
459                 rte_event_dev_dump(evdev, stdout);
460                 return -1;
461         }
462         rte_pktmbuf_free(ev2.mbuf);
463
464         cleanup(t);
465         return 0;
466 }
467
468 static int
469 test_single_directed_packet(struct test *t)
470 {
471         const int rx_enq = 0;
472         const int wrk_enq = 2;
473         int err;
474
475         /* Create instance with 3 directed QIDs going to 3 ports */
476         if (init(t, 3, 3) < 0 ||
477                         create_ports(t, 3) < 0 ||
478                         create_directed_qids(t, 3, t->port) < 0)
479                 return -1;
480
481         if (rte_event_dev_start(evdev) < 0) {
482                 printf("%d: Error with start call\n", __LINE__);
483                 return -1;
484         }
485
486         /************** FORWARD ****************/
487         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
488         struct rte_event ev = {
489                         .op = RTE_EVENT_OP_NEW,
490                         .queue_id = wrk_enq,
491                         .mbuf = arp,
492         };
493
494         if (!arp) {
495                 printf("%d: gen of pkt failed\n", __LINE__);
496                 return -1;
497         }
498
499         const uint32_t MAGIC_SEQN = 4711;
500         arp->seqn = MAGIC_SEQN;
501
502         /* generate pkt and enqueue */
503         err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
504         if (err < 0) {
505                 printf("%d: error failed to enqueue\n", __LINE__);
506                 return -1;
507         }
508
509         /* Run schedule() as dir packets may need to be re-ordered */
510         rte_event_schedule(evdev);
511
512         struct test_event_dev_stats stats;
513         err = test_event_dev_stats_get(evdev, &stats);
514         if (err) {
515                 printf("%d: error failed to get stats\n", __LINE__);
516                 return -1;
517         }
518
519         if (stats.port_rx_pkts[rx_enq] != 1) {
520                 printf("%d: error stats incorrect for directed port\n",
521                                 __LINE__);
522                 return -1;
523         }
524
525         uint32_t deq_pkts;
526         deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
527         if (deq_pkts != 1) {
528                 printf("%d: error failed to deq\n", __LINE__);
529                 return -1;
530         }
531
532         err = test_event_dev_stats_get(evdev, &stats);
533         if (stats.port_rx_pkts[wrk_enq] != 0 &&
534                         stats.port_rx_pkts[wrk_enq] != 1) {
535                 printf("%d: error directed stats post-dequeue\n", __LINE__);
536                 return -1;
537         }
538
539         if (ev.mbuf->seqn != MAGIC_SEQN) {
540                 printf("%d: error magic sequence number not dequeued\n",
541                                 __LINE__);
542                 return -1;
543         }
544
545         rte_pktmbuf_free(ev.mbuf);
546         cleanup(t);
547         return 0;
548 }
549
550 static int
551 test_directed_forward_credits(struct test *t)
552 {
553         uint32_t i;
554         int32_t err;
555
556         if (init(t, 1, 1) < 0 ||
557                         create_ports(t, 1) < 0 ||
558                         create_directed_qids(t, 1, t->port) < 0)
559                 return -1;
560
561         if (rte_event_dev_start(evdev) < 0) {
562                 printf("%d: Error with start call\n", __LINE__);
563                 return -1;
564         }
565
566         struct rte_event ev = {
567                         .op = RTE_EVENT_OP_NEW,
568                         .queue_id = 0,
569         };
570
571         for (i = 0; i < 1000; i++) {
572                 err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
573                 if (err < 0) {
574                         printf("%d: error failed to enqueue\n", __LINE__);
575                         return -1;
576                 }
577                 rte_event_schedule(evdev);
578
579                 uint32_t deq_pkts;
580                 deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
581                 if (deq_pkts != 1) {
582                         printf("%d: error failed to deq\n", __LINE__);
583                         return -1;
584                 }
585
586                 /* re-write event to be a forward, and continue looping it */
587                 ev.op = RTE_EVENT_OP_FORWARD;
588         }
589
590         cleanup(t);
591         return 0;
592 }
593
594
595 static int
596 test_priority_directed(struct test *t)
597 {
598         if (init(t, 1, 1) < 0 ||
599                         create_ports(t, 1) < 0 ||
600                         create_directed_qids(t, 1, t->port) < 0) {
601                 printf("%d: Error initializing device\n", __LINE__);
602                 return -1;
603         }
604
605         if (rte_event_dev_start(evdev) < 0) {
606                 printf("%d: Error with start call\n", __LINE__);
607                 return -1;
608         }
609
610         return run_prio_packet_test(t);
611 }
612
613 static int
614 test_priority_atomic(struct test *t)
615 {
616         if (init(t, 1, 1) < 0 ||
617                         create_ports(t, 1) < 0 ||
618                         create_atomic_qids(t, 1) < 0) {
619                 printf("%d: Error initializing device\n", __LINE__);
620                 return -1;
621         }
622
623         /* map the QID */
624         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
625                 printf("%d: error mapping qid to port\n", __LINE__);
626                 return -1;
627         }
628         if (rte_event_dev_start(evdev) < 0) {
629                 printf("%d: Error with start call\n", __LINE__);
630                 return -1;
631         }
632
633         return run_prio_packet_test(t);
634 }
635
636 static int
637 test_priority_ordered(struct test *t)
638 {
639         if (init(t, 1, 1) < 0 ||
640                         create_ports(t, 1) < 0 ||
641                         create_ordered_qids(t, 1) < 0) {
642                 printf("%d: Error initializing device\n", __LINE__);
643                 return -1;
644         }
645
646         /* map the QID */
647         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
648                 printf("%d: error mapping qid to port\n", __LINE__);
649                 return -1;
650         }
651         if (rte_event_dev_start(evdev) < 0) {
652                 printf("%d: Error with start call\n", __LINE__);
653                 return -1;
654         }
655
656         return run_prio_packet_test(t);
657 }
658
659 static int
660 test_priority_unordered(struct test *t)
661 {
662         if (init(t, 1, 1) < 0 ||
663                         create_ports(t, 1) < 0 ||
664                         create_unordered_qids(t, 1) < 0) {
665                 printf("%d: Error initializing device\n", __LINE__);
666                 return -1;
667         }
668
669         /* map the QID */
670         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
671                 printf("%d: error mapping qid to port\n", __LINE__);
672                 return -1;
673         }
674         if (rte_event_dev_start(evdev) < 0) {
675                 printf("%d: Error with start call\n", __LINE__);
676                 return -1;
677         }
678
679         return run_prio_packet_test(t);
680 }
681
682 static int
683 burst_packets(struct test *t)
684 {
685         /************** CONFIG ****************/
686         uint32_t i;
687         int err;
688         int ret;
689
690         /* Create instance with 2 ports and 2 queues */
691         if (init(t, 2, 2) < 0 ||
692                         create_ports(t, 2) < 0 ||
693                         create_atomic_qids(t, 2) < 0) {
694                 printf("%d: Error initializing device\n", __LINE__);
695                 return -1;
696         }
697
698         /* CQ mapping to QID */
699         ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
700         if (ret != 1) {
701                 printf("%d: error mapping lb qid0\n", __LINE__);
702                 return -1;
703         }
704         ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
705         if (ret != 1) {
706                 printf("%d: error mapping lb qid1\n", __LINE__);
707                 return -1;
708         }
709
710         if (rte_event_dev_start(evdev) < 0) {
711                 printf("%d: Error with start call\n", __LINE__);
712                 return -1;
713         }
714
715         /************** FORWARD ****************/
716         const uint32_t rx_port = 0;
717         const uint32_t NUM_PKTS = 2;
718
719         for (i = 0; i < NUM_PKTS; i++) {
720                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
721                 if (!arp) {
722                         printf("%d: error generating pkt\n", __LINE__);
723                         return -1;
724                 }
725
726                 struct rte_event ev = {
727                                 .op = RTE_EVENT_OP_NEW,
728                                 .queue_id = i % 2,
729                                 .flow_id = i % 3,
730                                 .mbuf = arp,
731                 };
732                 /* generate pkt and enqueue */
733                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
734                 if (err < 0) {
735                         printf("%d: Failed to enqueue\n", __LINE__);
736                         return -1;
737                 }
738         }
739         rte_event_schedule(evdev);
740
741         /* Check stats for all NUM_PKTS arrived to sched core */
742         struct test_event_dev_stats stats;
743
744         err = test_event_dev_stats_get(evdev, &stats);
745         if (err) {
746                 printf("%d: failed to get stats\n", __LINE__);
747                 return -1;
748         }
749         if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
750                 printf("%d: Sched core didn't receive all %d pkts\n",
751                                 __LINE__, NUM_PKTS);
752                 rte_event_dev_dump(evdev, stdout);
753                 return -1;
754         }
755
756         uint32_t deq_pkts;
757         int p;
758
759         deq_pkts = 0;
760         /******** DEQ QID 1 *******/
761         do {
762                 struct rte_event ev;
763                 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
764                 deq_pkts += p;
765                 rte_pktmbuf_free(ev.mbuf);
766         } while (p);
767
768         if (deq_pkts != NUM_PKTS/2) {
769                 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
770                                 __LINE__);
771                 return -1;
772         }
773
774         /******** DEQ QID 2 *******/
775         deq_pkts = 0;
776         do {
777                 struct rte_event ev;
778                 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
779                 deq_pkts += p;
780                 rte_pktmbuf_free(ev.mbuf);
781         } while (p);
782         if (deq_pkts != NUM_PKTS/2) {
783                 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
784                                 __LINE__);
785                 return -1;
786         }
787
788         cleanup(t);
789         return 0;
790 }
791
792 static int
793 abuse_inflights(struct test *t)
794 {
795         const int rx_enq = 0;
796         const int wrk_enq = 2;
797         int err;
798
799         /* Create instance with 4 ports */
800         if (init(t, 1, 4) < 0 ||
801                         create_ports(t, 4) < 0 ||
802                         create_atomic_qids(t, 1) < 0) {
803                 printf("%d: Error initializing device\n", __LINE__);
804                 return -1;
805         }
806
807         /* CQ mapping to QID */
808         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
809         if (err != 1) {
810                 printf("%d: error mapping lb qid\n", __LINE__);
811                 cleanup(t);
812                 return -1;
813         }
814
815         if (rte_event_dev_start(evdev) < 0) {
816                 printf("%d: Error with start call\n", __LINE__);
817                 return -1;
818         }
819
820         /* Enqueue op only */
821         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
822         if (err < 0) {
823                 printf("%d: Failed to enqueue\n", __LINE__);
824                 return -1;
825         }
826
827         /* schedule */
828         rte_event_schedule(evdev);
829
830         struct test_event_dev_stats stats;
831
832         err = test_event_dev_stats_get(evdev, &stats);
833         if (err) {
834                 printf("%d: failed to get stats\n", __LINE__);
835                 return -1;
836         }
837
838         if (stats.rx_pkts != 0 ||
839                         stats.tx_pkts != 0 ||
840                         stats.port_inflight[wrk_enq] != 0) {
841                 printf("%d: Sched core didn't handle pkt as expected\n",
842                                 __LINE__);
843                 return -1;
844         }
845
846         cleanup(t);
847         return 0;
848 }
849
850 static int
851 xstats_tests(struct test *t)
852 {
853         const int wrk_enq = 2;
854         int err;
855
856         /* Create instance with 4 ports */
857         if (init(t, 1, 4) < 0 ||
858                         create_ports(t, 4) < 0 ||
859                         create_atomic_qids(t, 1) < 0) {
860                 printf("%d: Error initializing device\n", __LINE__);
861                 return -1;
862         }
863
864         /* CQ mapping to QID */
865         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
866         if (err != 1) {
867                 printf("%d: error mapping lb qid\n", __LINE__);
868                 cleanup(t);
869                 return -1;
870         }
871
872         if (rte_event_dev_start(evdev) < 0) {
873                 printf("%d: Error with start call\n", __LINE__);
874                 return -1;
875         }
876
877         const uint32_t XSTATS_MAX = 1024;
878
879         uint32_t i;
880         uint32_t ids[XSTATS_MAX];
881         uint64_t values[XSTATS_MAX];
882         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
883
884         for (i = 0; i < XSTATS_MAX; i++)
885                 ids[i] = i;
886
887         /* Device names / values */
888         int ret = rte_event_dev_xstats_names_get(evdev,
889                                         RTE_EVENT_DEV_XSTATS_DEVICE,
890                                         0, xstats_names, ids, XSTATS_MAX);
891         if (ret != 6) {
892                 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
893                 return -1;
894         }
895         ret = rte_event_dev_xstats_get(evdev,
896                                         RTE_EVENT_DEV_XSTATS_DEVICE,
897                                         0, ids, values, ret);
898         if (ret != 6) {
899                 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
900                 return -1;
901         }
902
903         /* Port names / values */
904         ret = rte_event_dev_xstats_names_get(evdev,
905                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
906                                         xstats_names, ids, XSTATS_MAX);
907         if (ret != 21) {
908                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
909                 return -1;
910         }
911         ret = rte_event_dev_xstats_get(evdev,
912                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
913                                         ids, values, ret);
914         if (ret != 21) {
915                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
916                 return -1;
917         }
918
919         /* Queue names / values */
920         ret = rte_event_dev_xstats_names_get(evdev,
921                                         RTE_EVENT_DEV_XSTATS_QUEUE,
922                                         0, xstats_names, ids, XSTATS_MAX);
923         if (ret != 17) {
924                 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
925                 return -1;
926         }
927
928         /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
929         ret = rte_event_dev_xstats_get(evdev,
930                                         RTE_EVENT_DEV_XSTATS_QUEUE,
931                                         1, ids, values, ret);
932         if (ret != -EINVAL) {
933                 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
934                 return -1;
935         }
936
937         ret = rte_event_dev_xstats_get(evdev,
938                                         RTE_EVENT_DEV_XSTATS_QUEUE,
939                                         0, ids, values, ret);
940         if (ret != 17) {
941                 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
942                 return -1;
943         }
944
945         /* enqueue packets to check values */
946         for (i = 0; i < 3; i++) {
947                 struct rte_event ev;
948                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
949                 if (!arp) {
950                         printf("%d: gen of pkt failed\n", __LINE__);
951                         return -1;
952                 }
953                 ev.queue_id = t->qid[i];
954                 ev.op = RTE_EVENT_OP_NEW;
955                 ev.mbuf = arp;
956                 ev.flow_id = 7;
957                 arp->seqn = i;
958
959                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
960                 if (err != 1) {
961                         printf("%d: Failed to enqueue\n", __LINE__);
962                         return -1;
963                 }
964         }
965
966         rte_event_schedule(evdev);
967
968         /* Device names / values */
969         int num_stats = rte_event_dev_xstats_names_get(evdev,
970                                         RTE_EVENT_DEV_XSTATS_DEVICE, 0,
971                                         xstats_names, ids, XSTATS_MAX);
972         if (num_stats < 0)
973                 goto fail;
974         ret = rte_event_dev_xstats_get(evdev,
975                                         RTE_EVENT_DEV_XSTATS_DEVICE,
976                                         0, ids, values, num_stats);
977         static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
978         for (i = 0; (signed int)i < ret; i++) {
979                 if (expected[i] != values[i]) {
980                         printf(
981                                 "%d Error xstat %d (id %d) %s : %"PRIu64
982                                 ", expect %"PRIu64"\n",
983                                 __LINE__, i, ids[i], xstats_names[i].name,
984                                 values[i], expected[i]);
985                         goto fail;
986                 }
987         }
988
989         ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
990                                         0, NULL, 0);
991
992         /* ensure reset statistics are zero-ed */
993         static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
994         ret = rte_event_dev_xstats_get(evdev,
995                                         RTE_EVENT_DEV_XSTATS_DEVICE,
996                                         0, ids, values, num_stats);
997         for (i = 0; (signed int)i < ret; i++) {
998                 if (expected_zero[i] != values[i]) {
999                         printf(
1000                                 "%d Error, xstat %d (id %d) %s : %"PRIu64
1001                                 ", expect %"PRIu64"\n",
1002                                 __LINE__, i, ids[i], xstats_names[i].name,
1003                                 values[i], expected_zero[i]);
1004                         goto fail;
1005                 }
1006         }
1007
1008         /* port reset checks */
1009         num_stats = rte_event_dev_xstats_names_get(evdev,
1010                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
1011                                         xstats_names, ids, XSTATS_MAX);
1012         if (num_stats < 0)
1013                 goto fail;
1014         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1015                                         0, ids, values, num_stats);
1016
1017         static const uint64_t port_expected[] = {
1018                 3 /* rx */,
1019                 0 /* tx */,
1020                 0 /* drop */,
1021                 0 /* inflights */,
1022                 0 /* avg pkt cycles */,
1023                 29 /* credits */,
1024                 0 /* rx ring used */,
1025                 4096 /* rx ring free */,
1026                 0 /* cq ring used */,
1027                 32 /* cq ring free */,
1028                 0 /* dequeue calls */,
1029                 /* 10 dequeue burst buckets */
1030                 0, 0, 0, 0, 0,
1031                 0, 0, 0, 0, 0,
1032         };
1033         if (ret != RTE_DIM(port_expected)) {
1034                 printf(
1035                         "%s %d: wrong number of port stats (%d), expected %zu\n",
1036                         __func__, __LINE__, ret, RTE_DIM(port_expected));
1037         }
1038
1039         for (i = 0; (signed int)i < ret; i++) {
1040                 if (port_expected[i] != values[i]) {
1041                         printf(
1042                                 "%s : %d: Error stat %s is %"PRIu64
1043                                 ", expected %"PRIu64"\n",
1044                                 __func__, __LINE__, xstats_names[i].name,
1045                                 values[i], port_expected[i]);
1046                         goto fail;
1047                 }
1048         }
1049
1050         ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1051                                         0, NULL, 0);
1052
1053         /* ensure reset statistics are zero-ed */
1054         static const uint64_t port_expected_zero[] = {
1055                 0 /* rx */,
1056                 0 /* tx */,
1057                 0 /* drop */,
1058                 0 /* inflights */,
1059                 0 /* avg pkt cycles */,
1060                 29 /* credits */,
1061                 0 /* rx ring used */,
1062                 4096 /* rx ring free */,
1063                 0 /* cq ring used */,
1064                 32 /* cq ring free */,
1065                 0 /* dequeue calls */,
1066                 /* 10 dequeue burst buckets */
1067                 0, 0, 0, 0, 0,
1068                 0, 0, 0, 0, 0,
1069         };
1070         ret = rte_event_dev_xstats_get(evdev,
1071                                         RTE_EVENT_DEV_XSTATS_PORT,
1072                                         0, ids, values, num_stats);
1073         for (i = 0; (signed int)i < ret; i++) {
1074                 if (port_expected_zero[i] != values[i]) {
1075                         printf(
1076                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1077                                 ", expect %"PRIu64"\n",
1078                                 __LINE__, i, ids[i], xstats_names[i].name,
1079                                 values[i], port_expected_zero[i]);
1080                         goto fail;
1081                 }
1082         }
1083
1084         /* QUEUE STATS TESTS */
1085         num_stats = rte_event_dev_xstats_names_get(evdev,
1086                                                 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1087                                                 xstats_names, ids, XSTATS_MAX);
1088         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1089                                         0, ids, values, num_stats);
1090         if (ret < 0) {
1091                 printf("xstats get returned %d\n", ret);
1092                 goto fail;
1093         }
1094         if ((unsigned int)ret > XSTATS_MAX)
1095                 printf("%s %d: more xstats available than space\n",
1096                                 __func__, __LINE__);
1097
1098         static const uint64_t queue_expected[] = {
1099                 3 /* rx */,
1100                 3 /* tx */,
1101                 0 /* drop */,
1102                 3 /* inflights */,
1103                 512 /* iq size */,
1104                 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1105                 /* QID-to-Port: pinned_flows, packets */
1106                 0, 0,
1107                 0, 0,
1108                 1, 3,
1109                 0, 0,
1110         };
1111         for (i = 0; (signed int)i < ret; i++) {
1112                 if (queue_expected[i] != values[i]) {
1113                         printf(
1114                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1115                                 ", expect %"PRIu64"\n",
1116                                 __LINE__, i, ids[i], xstats_names[i].name,
1117                                 values[i], queue_expected[i]);
1118                         goto fail;
1119                 }
1120         }
1121
1122         /* Reset the queue stats here */
1123         ret = rte_event_dev_xstats_reset(evdev,
1124                                         RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1125                                         NULL,
1126                                         0);
1127
1128         /* Verify that the resetable stats are reset, and others are not */
1129         static const uint64_t queue_expected_zero[] = {
1130                 0 /* rx */,
1131                 0 /* tx */,
1132                 0 /* drop */,
1133                 3 /* inflight */,
1134                 512 /* iq size */,
1135                 0, 0, 0, 0, /* 4 iq used */
1136                 /* QID-to-Port: pinned_flows, packets */
1137                 0, 0,
1138                 0, 0,
1139                 1, 0,
1140                 0, 0,
1141         };
1142
1143         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1144                                         ids, values, num_stats);
1145         int fails = 0;
1146         for (i = 0; (signed int)i < ret; i++) {
1147                 if (queue_expected_zero[i] != values[i]) {
1148                         printf(
1149                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1150                                 ", expect %"PRIu64"\n",
1151                                 __LINE__, i, ids[i], xstats_names[i].name,
1152                                 values[i], queue_expected_zero[i]);
1153                         fails++;
1154                 }
1155         }
1156         if (fails) {
1157                 printf("%d : %d of values were not as expected above\n",
1158                                 __LINE__, fails);
1159                 goto fail;
1160         }
1161
1162         cleanup(t);
1163         return 0;
1164
1165 fail:
1166         rte_event_dev_dump(0, stdout);
1167         cleanup(t);
1168         return -1;
1169 }
1170
1171
1172 static int
1173 xstats_id_abuse_tests(struct test *t)
1174 {
1175         int err;
1176         const uint32_t XSTATS_MAX = 1024;
1177         const uint32_t link_port = 2;
1178
1179         uint32_t ids[XSTATS_MAX];
1180         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1181
1182         /* Create instance with 4 ports */
1183         if (init(t, 1, 4) < 0 ||
1184                         create_ports(t, 4) < 0 ||
1185                         create_atomic_qids(t, 1) < 0) {
1186                 printf("%d: Error initializing device\n", __LINE__);
1187                 goto fail;
1188         }
1189
1190         err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1191         if (err != 1) {
1192                 printf("%d: error mapping lb qid\n", __LINE__);
1193                 goto fail;
1194         }
1195
1196         if (rte_event_dev_start(evdev) < 0) {
1197                 printf("%d: Error with start call\n", __LINE__);
1198                 goto fail;
1199         }
1200
1201         /* no test for device, as it ignores the port/q number */
1202         int num_stats = rte_event_dev_xstats_names_get(evdev,
1203                                         RTE_EVENT_DEV_XSTATS_PORT,
1204                                         UINT8_MAX-1, xstats_names, ids,
1205                                         XSTATS_MAX);
1206         if (num_stats != 0) {
1207                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1208                                 0, num_stats);
1209                 goto fail;
1210         }
1211
1212         num_stats = rte_event_dev_xstats_names_get(evdev,
1213                                         RTE_EVENT_DEV_XSTATS_QUEUE,
1214                                         UINT8_MAX-1, xstats_names, ids,
1215                                         XSTATS_MAX);
1216         if (num_stats != 0) {
1217                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1218                                 0, num_stats);
1219                 goto fail;
1220         }
1221
1222         cleanup(t);
1223         return 0;
1224 fail:
1225         cleanup(t);
1226         return -1;
1227 }
1228
1229 static int
1230 port_reconfig_credits(struct test *t)
1231 {
1232         if (init(t, 1, 1) < 0) {
1233                 printf("%d: Error initializing device\n", __LINE__);
1234                 return -1;
1235         }
1236
1237         uint32_t i;
1238         const uint32_t NUM_ITERS = 32;
1239         for (i = 0; i < NUM_ITERS; i++) {
1240                 const struct rte_event_queue_conf conf = {
1241                         .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1242                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1243                         .nb_atomic_flows = 1024,
1244                         .nb_atomic_order_sequences = 1024,
1245                 };
1246                 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1247                         printf("%d: error creating qid\n", __LINE__);
1248                         return -1;
1249                 }
1250                 t->qid[0] = 0;
1251
1252                 static const struct rte_event_port_conf port_conf = {
1253                                 .new_event_threshold = 128,
1254                                 .dequeue_depth = 32,
1255                                 .enqueue_depth = 64,
1256                 };
1257                 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1258                         printf("%d Error setting up port\n", __LINE__);
1259                         return -1;
1260                 }
1261
1262                 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1263                 if (links != 1) {
1264                         printf("%d: error mapping lb qid\n", __LINE__);
1265                         goto fail;
1266                 }
1267
1268                 if (rte_event_dev_start(evdev) < 0) {
1269                         printf("%d: Error with start call\n", __LINE__);
1270                         goto fail;
1271                 }
1272
1273                 const uint32_t NPKTS = 1;
1274                 uint32_t j;
1275                 for (j = 0; j < NPKTS; j++) {
1276                         struct rte_event ev;
1277                         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1278                         if (!arp) {
1279                                 printf("%d: gen of pkt failed\n", __LINE__);
1280                                 goto fail;
1281                         }
1282                         ev.queue_id = t->qid[0];
1283                         ev.op = RTE_EVENT_OP_NEW;
1284                         ev.mbuf = arp;
1285                         int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1286                         if (err != 1) {
1287                                 printf("%d: Failed to enqueue\n", __LINE__);
1288                                 rte_event_dev_dump(0, stdout);
1289                                 goto fail;
1290                         }
1291                 }
1292
1293                 rte_event_schedule(evdev);
1294
1295                 struct rte_event ev[NPKTS];
1296                 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1297                                                         NPKTS, 0);
1298                 if (deq != 1)
1299                         printf("%d error; no packet dequeued\n", __LINE__);
1300
1301                 /* let cleanup below stop the device on last iter */
1302                 if (i != NUM_ITERS-1)
1303                         rte_event_dev_stop(evdev);
1304         }
1305
1306         cleanup(t);
1307         return 0;
1308 fail:
1309         cleanup(t);
1310         return -1;
1311 }
1312
1313 static int
1314 port_single_lb_reconfig(struct test *t)
1315 {
1316         if (init(t, 2, 2) < 0) {
1317                 printf("%d: Error initializing device\n", __LINE__);
1318                 goto fail;
1319         }
1320
1321         static const struct rte_event_queue_conf conf_lb_atomic = {
1322                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1323                 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1324                 .nb_atomic_flows = 1024,
1325                 .nb_atomic_order_sequences = 1024,
1326         };
1327         if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1328                 printf("%d: error creating qid\n", __LINE__);
1329                 goto fail;
1330         }
1331
1332         static const struct rte_event_queue_conf conf_single_link = {
1333                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1334                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1335         };
1336         if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1337                 printf("%d: error creating qid\n", __LINE__);
1338                 goto fail;
1339         }
1340
1341         struct rte_event_port_conf port_conf = {
1342                 .new_event_threshold = 128,
1343                 .dequeue_depth = 32,
1344                 .enqueue_depth = 64,
1345         };
1346         if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1347                 printf("%d Error setting up port\n", __LINE__);
1348                 goto fail;
1349         }
1350         if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1351                 printf("%d Error setting up port\n", __LINE__);
1352                 goto fail;
1353         }
1354
1355         /* link port to lb queue */
1356         uint8_t queue_id = 0;
1357         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1358                 printf("%d: error creating link for qid\n", __LINE__);
1359                 goto fail;
1360         }
1361
1362         int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1363         if (ret != 1) {
1364                 printf("%d: Error unlinking lb port\n", __LINE__);
1365                 goto fail;
1366         }
1367
1368         queue_id = 1;
1369         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1370                 printf("%d: error creating link for qid\n", __LINE__);
1371                 goto fail;
1372         }
1373
1374         queue_id = 0;
1375         int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1376         if (err != 1) {
1377                 printf("%d: error mapping lb qid\n", __LINE__);
1378                 goto fail;
1379         }
1380
1381         if (rte_event_dev_start(evdev) < 0) {
1382                 printf("%d: Error with start call\n", __LINE__);
1383                 goto fail;
1384         }
1385
1386         cleanup(t);
1387         return 0;
1388 fail:
1389         cleanup(t);
1390         return -1;
1391 }
1392
1393 static int
1394 xstats_brute_force(struct test *t)
1395 {
1396         uint32_t i;
1397         const uint32_t XSTATS_MAX = 1024;
1398         uint32_t ids[XSTATS_MAX];
1399         uint64_t values[XSTATS_MAX];
1400         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1401
1402
1403         /* Create instance with 4 ports */
1404         if (init(t, 1, 4) < 0 ||
1405                         create_ports(t, 4) < 0 ||
1406                         create_atomic_qids(t, 1) < 0) {
1407                 printf("%d: Error initializing device\n", __LINE__);
1408                 return -1;
1409         }
1410
1411         int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1412         if (err != 1) {
1413                 printf("%d: error mapping lb qid\n", __LINE__);
1414                 goto fail;
1415         }
1416
1417         if (rte_event_dev_start(evdev) < 0) {
1418                 printf("%d: Error with start call\n", __LINE__);
1419                 goto fail;
1420         }
1421
1422         for (i = 0; i < XSTATS_MAX; i++)
1423                 ids[i] = i;
1424
1425         for (i = 0; i < 3; i++) {
1426                 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1427                 uint32_t j;
1428                 for (j = 0; j < UINT8_MAX; j++) {
1429                         rte_event_dev_xstats_names_get(evdev, mode,
1430                                 j, xstats_names, ids, XSTATS_MAX);
1431
1432                         rte_event_dev_xstats_get(evdev, mode, j, ids,
1433                                                  values, XSTATS_MAX);
1434                 }
1435         }
1436
1437         cleanup(t);
1438         return 0;
1439 fail:
1440         cleanup(t);
1441         return -1;
1442 }
1443
1444 static int
1445 xstats_id_reset_tests(struct test *t)
1446 {
1447         const int wrk_enq = 2;
1448         int err;
1449
1450         /* Create instance with 4 ports */
1451         if (init(t, 1, 4) < 0 ||
1452                         create_ports(t, 4) < 0 ||
1453                         create_atomic_qids(t, 1) < 0) {
1454                 printf("%d: Error initializing device\n", __LINE__);
1455                 return -1;
1456         }
1457
1458         /* CQ mapping to QID */
1459         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1460         if (err != 1) {
1461                 printf("%d: error mapping lb qid\n", __LINE__);
1462                 goto fail;
1463         }
1464
1465         if (rte_event_dev_start(evdev) < 0) {
1466                 printf("%d: Error with start call\n", __LINE__);
1467                 goto fail;
1468         }
1469
1470 #define XSTATS_MAX 1024
1471         int ret;
1472         uint32_t i;
1473         uint32_t ids[XSTATS_MAX];
1474         uint64_t values[XSTATS_MAX];
1475         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1476
1477         for (i = 0; i < XSTATS_MAX; i++)
1478                 ids[i] = i;
1479
1480 #define NUM_DEV_STATS 6
1481         /* Device names / values */
1482         int num_stats = rte_event_dev_xstats_names_get(evdev,
1483                                         RTE_EVENT_DEV_XSTATS_DEVICE,
1484                                         0, xstats_names, ids, XSTATS_MAX);
1485         if (num_stats != NUM_DEV_STATS) {
1486                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1487                                 NUM_DEV_STATS, num_stats);
1488                 goto fail;
1489         }
1490         ret = rte_event_dev_xstats_get(evdev,
1491                                         RTE_EVENT_DEV_XSTATS_DEVICE,
1492                                         0, ids, values, num_stats);
1493         if (ret != NUM_DEV_STATS) {
1494                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1495                                 NUM_DEV_STATS, ret);
1496                 goto fail;
1497         }
1498
1499 #define NPKTS 7
1500         for (i = 0; i < NPKTS; i++) {
1501                 struct rte_event ev;
1502                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1503                 if (!arp) {
1504                         printf("%d: gen of pkt failed\n", __LINE__);
1505                         goto fail;
1506                 }
1507                 ev.queue_id = t->qid[i];
1508                 ev.op = RTE_EVENT_OP_NEW;
1509                 ev.mbuf = arp;
1510                 arp->seqn = i;
1511
1512                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1513                 if (err != 1) {
1514                         printf("%d: Failed to enqueue\n", __LINE__);
1515                         goto fail;
1516                 }
1517         }
1518
1519         rte_event_schedule(evdev);
1520
1521         static const char * const dev_names[] = {
1522                 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1523                 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1524         };
1525         uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1526         for (i = 0; (int)i < ret; i++) {
1527                 unsigned int id;
1528                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1529                                                                 dev_names[i],
1530                                                                 &id);
1531                 if (id != i) {
1532                         printf("%d: %s id incorrect, expected %d got %d\n",
1533                                         __LINE__, dev_names[i], i, id);
1534                         goto fail;
1535                 }
1536                 if (val != dev_expected[i]) {
1537                         printf("%d: %s value incorrect, expected %"
1538                                 PRIu64" got %d\n", __LINE__, dev_names[i],
1539                                 dev_expected[i], id);
1540                         goto fail;
1541                 }
1542                 /* reset to zero */
1543                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1544                                                 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1545                                                 &id,
1546                                                 1);
1547                 if (reset_ret) {
1548                         printf("%d: failed to reset successfully\n", __LINE__);
1549                         goto fail;
1550                 }
1551                 dev_expected[i] = 0;
1552                 /* check value again */
1553                 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1554                 if (val != dev_expected[i]) {
1555                         printf("%d: %s value incorrect, expected %"PRIu64
1556                                 " got %"PRIu64"\n", __LINE__, dev_names[i],
1557                                 dev_expected[i], val);
1558                         goto fail;
1559                 }
1560         };
1561
1562 /* 48 is stat offset from start of the devices whole xstats.
1563  * This WILL break every time we add a statistic to a port
1564  * or the device, but there is no other way to test
1565  */
1566 #define PORT_OFF 48
1567 /* num stats for the tested port. CQ size adds more stats to a port */
1568 #define NUM_PORT_STATS 21
1569 /* the port to test. */
1570 #define PORT 2
1571         num_stats = rte_event_dev_xstats_names_get(evdev,
1572                                         RTE_EVENT_DEV_XSTATS_PORT, PORT,
1573                                         xstats_names, ids, XSTATS_MAX);
1574         if (num_stats != NUM_PORT_STATS) {
1575                 printf("%d: expected %d stats, got return %d\n",
1576                         __LINE__, NUM_PORT_STATS, num_stats);
1577                 goto fail;
1578         }
1579         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1580                                         ids, values, num_stats);
1581
1582         if (ret != NUM_PORT_STATS) {
1583                 printf("%d: expected %d stats, got return %d\n",
1584                                 __LINE__, NUM_PORT_STATS, ret);
1585                 goto fail;
1586         }
1587         static const char * const port_names[] = {
1588                 "port_2_rx",
1589                 "port_2_tx",
1590                 "port_2_drop",
1591                 "port_2_inflight",
1592                 "port_2_avg_pkt_cycles",
1593                 "port_2_credits",
1594                 "port_2_rx_ring_used",
1595                 "port_2_rx_ring_free",
1596                 "port_2_cq_ring_used",
1597                 "port_2_cq_ring_free",
1598                 "port_2_dequeue_calls",
1599                 "port_2_dequeues_returning_0",
1600                 "port_2_dequeues_returning_1-4",
1601                 "port_2_dequeues_returning_5-8",
1602                 "port_2_dequeues_returning_9-12",
1603                 "port_2_dequeues_returning_13-16",
1604                 "port_2_dequeues_returning_17-20",
1605                 "port_2_dequeues_returning_21-24",
1606                 "port_2_dequeues_returning_25-28",
1607                 "port_2_dequeues_returning_29-32",
1608                 "port_2_dequeues_returning_33-36",
1609         };
1610         uint64_t port_expected[] = {
1611                 0, /* rx */
1612                 NPKTS, /* tx */
1613                 0, /* drop */
1614                 NPKTS, /* inflight */
1615                 0, /* avg pkt cycles */
1616                 0, /* credits */
1617                 0, /* rx ring used */
1618                 4096, /* rx ring free */
1619                 NPKTS,  /* cq ring used */
1620                 25, /* cq ring free */
1621                 0, /* dequeue zero calls */
1622                 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1623                 0, 0, 0, 0, 0,
1624         };
1625         uint64_t port_expected_zero[] = {
1626                 0, /* rx */
1627                 0, /* tx */
1628                 0, /* drop */
1629                 NPKTS, /* inflight */
1630                 0, /* avg pkt cycles */
1631                 0, /* credits */
1632                 0, /* rx ring used */
1633                 4096, /* rx ring free */
1634                 NPKTS,  /* cq ring used */
1635                 25, /* cq ring free */
1636                 0, /* dequeue zero calls */
1637                 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1638                 0, 0, 0, 0, 0,
1639         };
1640         if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1641                         RTE_DIM(port_names) != NUM_PORT_STATS) {
1642                 printf("%d: port array of wrong size\n", __LINE__);
1643                 goto fail;
1644         }
1645
1646         int failed = 0;
1647         for (i = 0; (int)i < ret; i++) {
1648                 unsigned int id;
1649                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1650                                                                 port_names[i],
1651                                                                 &id);
1652                 if (id != i + PORT_OFF) {
1653                         printf("%d: %s id incorrect, expected %d got %d\n",
1654                                         __LINE__, port_names[i], i+PORT_OFF,
1655                                         id);
1656                         failed = 1;
1657                 }
1658                 if (val != port_expected[i]) {
1659                         printf("%d: %s value incorrect, expected %"PRIu64
1660                                 " got %d\n", __LINE__, port_names[i],
1661                                 port_expected[i], id);
1662                         failed = 1;
1663                 }
1664                 /* reset to zero */
1665                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1666                                                 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1667                                                 &id,
1668                                                 1);
1669                 if (reset_ret) {
1670                         printf("%d: failed to reset successfully\n", __LINE__);
1671                         failed = 1;
1672                 }
1673                 /* check value again */
1674                 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1675                 if (val != port_expected_zero[i]) {
1676                         printf("%d: %s value incorrect, expected %"PRIu64
1677                                 " got %"PRIu64"\n", __LINE__, port_names[i],
1678                                 port_expected_zero[i], val);
1679                         failed = 1;
1680                 }
1681         };
1682         if (failed)
1683                 goto fail;
1684
1685 /* num queue stats */
1686 #define NUM_Q_STATS 17
1687 /* queue offset from start of the devices whole xstats.
1688  * This will break every time we add a statistic to a device/port/queue
1689  */
1690 #define QUEUE_OFF 90
1691         const uint32_t queue = 0;
1692         num_stats = rte_event_dev_xstats_names_get(evdev,
1693                                         RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1694                                         xstats_names, ids, XSTATS_MAX);
1695         if (num_stats != NUM_Q_STATS) {
1696                 printf("%d: expected %d stats, got return %d\n",
1697                         __LINE__, NUM_Q_STATS, num_stats);
1698                 goto fail;
1699         }
1700         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1701                                         queue, ids, values, num_stats);
1702         if (ret != NUM_Q_STATS) {
1703                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1704                 goto fail;
1705         }
1706         static const char * const queue_names[] = {
1707                 "qid_0_rx",
1708                 "qid_0_tx",
1709                 "qid_0_drop",
1710                 "qid_0_inflight",
1711                 "qid_0_iq_size",
1712                 "qid_0_iq_0_used",
1713                 "qid_0_iq_1_used",
1714                 "qid_0_iq_2_used",
1715                 "qid_0_iq_3_used",
1716                 "qid_0_port_0_pinned_flows",
1717                 "qid_0_port_0_packets",
1718                 "qid_0_port_1_pinned_flows",
1719                 "qid_0_port_1_packets",
1720                 "qid_0_port_2_pinned_flows",
1721                 "qid_0_port_2_packets",
1722                 "qid_0_port_3_pinned_flows",
1723                 "qid_0_port_3_packets",
1724         };
1725         uint64_t queue_expected[] = {
1726                 7, /* rx */
1727                 7, /* tx */
1728                 0, /* drop */
1729                 7, /* inflight */
1730                 512, /* iq size */
1731                 0, /* iq 0 used */
1732                 0, /* iq 1 used */
1733                 0, /* iq 2 used */
1734                 0, /* iq 3 used */
1735                 /* QID-to-Port: pinned_flows, packets */
1736                 0, 0,
1737                 0, 0,
1738                 1, 7,
1739                 0, 0,
1740         };
1741         uint64_t queue_expected_zero[] = {
1742                 0, /* rx */
1743                 0, /* tx */
1744                 0, /* drop */
1745                 7, /* inflight */
1746                 512, /* iq size */
1747                 0, /* iq 0 used */
1748                 0, /* iq 1 used */
1749                 0, /* iq 2 used */
1750                 0, /* iq 3 used */
1751                 /* QID-to-Port: pinned_flows, packets */
1752                 0, 0,
1753                 0, 0,
1754                 1, 0,
1755                 0, 0,
1756         };
1757         if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1758                         RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1759                         RTE_DIM(queue_names) != NUM_Q_STATS) {
1760                 printf("%d : queue array of wrong size\n", __LINE__);
1761                 goto fail;
1762         }
1763
1764         failed = 0;
1765         for (i = 0; (int)i < ret; i++) {
1766                 unsigned int id;
1767                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1768                                                                 queue_names[i],
1769                                                                 &id);
1770                 if (id != i + QUEUE_OFF) {
1771                         printf("%d: %s id incorrect, expected %d got %d\n",
1772                                         __LINE__, queue_names[i], i+QUEUE_OFF,
1773                                         id);
1774                         failed = 1;
1775                 }
1776                 if (val != queue_expected[i]) {
1777                         printf("%d: %d: %s value , expected %"PRIu64
1778                                 " got %"PRIu64"\n", i, __LINE__,
1779                                 queue_names[i], queue_expected[i], val);
1780                         failed = 1;
1781                 }
1782                 /* reset to zero */
1783                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1784                                                 RTE_EVENT_DEV_XSTATS_QUEUE,
1785                                                 queue, &id, 1);
1786                 if (reset_ret) {
1787                         printf("%d: failed to reset successfully\n", __LINE__);
1788                         failed = 1;
1789                 }
1790                 /* check value again */
1791                 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1792                                                         0);
1793                 if (val != queue_expected_zero[i]) {
1794                         printf("%d: %s value incorrect, expected %"PRIu64
1795                                 " got %"PRIu64"\n", __LINE__, queue_names[i],
1796                                 queue_expected_zero[i], val);
1797                         failed = 1;
1798                 }
1799         };
1800
1801         if (failed)
1802                 goto fail;
1803
1804         cleanup(t);
1805         return 0;
1806 fail:
1807         cleanup(t);
1808         return -1;
1809 }
1810
1811 static int
1812 ordered_reconfigure(struct test *t)
1813 {
1814         if (init(t, 1, 1) < 0 ||
1815                         create_ports(t, 1) < 0) {
1816                 printf("%d: Error initializing device\n", __LINE__);
1817                 return -1;
1818         }
1819
1820         const struct rte_event_queue_conf conf = {
1821                         .schedule_type = RTE_SCHED_TYPE_ORDERED,
1822                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1823                         .nb_atomic_flows = 1024,
1824                         .nb_atomic_order_sequences = 1024,
1825         };
1826
1827         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1828                 printf("%d: error creating qid\n", __LINE__);
1829                 goto failed;
1830         }
1831
1832         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1833                 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1834                 goto failed;
1835         }
1836
1837         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1838         if (rte_event_dev_start(evdev) < 0) {
1839                 printf("%d: Error with start call\n", __LINE__);
1840                 return -1;
1841         }
1842
1843         cleanup(t);
1844         return 0;
1845 failed:
1846         cleanup(t);
1847         return -1;
1848 }
1849
1850 static int
1851 qid_priorities(struct test *t)
1852 {
1853         /* Test works by having a CQ with enough empty space for all packets,
1854          * and enqueueing 3 packets to 3 QIDs. They must return based on the
1855          * priority of the QID, not the ingress order, to pass the test
1856          */
1857         unsigned int i;
1858         /* Create instance with 1 ports, and 3 qids */
1859         if (init(t, 3, 1) < 0 ||
1860                         create_ports(t, 1) < 0) {
1861                 printf("%d: Error initializing device\n", __LINE__);
1862                 return -1;
1863         }
1864
1865         for (i = 0; i < 3; i++) {
1866                 /* Create QID */
1867                 const struct rte_event_queue_conf conf = {
1868                         .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1869                         /* increase priority (0 == highest), as we go */
1870                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1871                         .nb_atomic_flows = 1024,
1872                         .nb_atomic_order_sequences = 1024,
1873                 };
1874
1875                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1876                         printf("%d: error creating qid %d\n", __LINE__, i);
1877                         return -1;
1878                 }
1879                 t->qid[i] = i;
1880         }
1881         t->nb_qids = i;
1882         /* map all QIDs to port */
1883         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1884
1885         if (rte_event_dev_start(evdev) < 0) {
1886                 printf("%d: Error with start call\n", __LINE__);
1887                 return -1;
1888         }
1889
1890         /* enqueue 3 packets, setting seqn and QID to check priority */
1891         for (i = 0; i < 3; i++) {
1892                 struct rte_event ev;
1893                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1894                 if (!arp) {
1895                         printf("%d: gen of pkt failed\n", __LINE__);
1896                         return -1;
1897                 }
1898                 ev.queue_id = t->qid[i];
1899                 ev.op = RTE_EVENT_OP_NEW;
1900                 ev.mbuf = arp;
1901                 arp->seqn = i;
1902
1903                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1904                 if (err != 1) {
1905                         printf("%d: Failed to enqueue\n", __LINE__);
1906                         return -1;
1907                 }
1908         }
1909
1910         rte_event_schedule(evdev);
1911
1912         /* dequeue packets, verify priority was upheld */
1913         struct rte_event ev[32];
1914         uint32_t deq_pkts =
1915                 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1916         if (deq_pkts != 3) {
1917                 printf("%d: failed to deq packets\n", __LINE__);
1918                 rte_event_dev_dump(evdev, stdout);
1919                 return -1;
1920         }
1921         for (i = 0; i < 3; i++) {
1922                 if (ev[i].mbuf->seqn != 2-i) {
1923                         printf(
1924                                 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1925                                         __LINE__, i);
1926                 }
1927         }
1928
1929         cleanup(t);
1930         return 0;
1931 }
1932
1933 static int
1934 load_balancing(struct test *t)
1935 {
1936         const int rx_enq = 0;
1937         int err;
1938         uint32_t i;
1939
1940         if (init(t, 1, 4) < 0 ||
1941                         create_ports(t, 4) < 0 ||
1942                         create_atomic_qids(t, 1) < 0) {
1943                 printf("%d: Error initializing device\n", __LINE__);
1944                 return -1;
1945         }
1946
1947         for (i = 0; i < 3; i++) {
1948                 /* map port 1 - 3 inclusive */
1949                 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1950                                 NULL, 1) != 1) {
1951                         printf("%d: error mapping qid to port %d\n",
1952                                         __LINE__, i);
1953                         return -1;
1954                 }
1955         }
1956
1957         if (rte_event_dev_start(evdev) < 0) {
1958                 printf("%d: Error with start call\n", __LINE__);
1959                 return -1;
1960         }
1961
1962         /************** FORWARD ****************/
1963         /*
1964          * Create a set of flows that test the load-balancing operation of the
1965          * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
1966          * with a new flow, which should be sent to the 3rd mapped CQ
1967          */
1968         static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
1969
1970         for (i = 0; i < RTE_DIM(flows); i++) {
1971                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1972                 if (!arp) {
1973                         printf("%d: gen of pkt failed\n", __LINE__);
1974                         return -1;
1975                 }
1976
1977                 struct rte_event ev = {
1978                                 .op = RTE_EVENT_OP_NEW,
1979                                 .queue_id = t->qid[0],
1980                                 .flow_id = flows[i],
1981                                 .mbuf = arp,
1982                 };
1983                 /* generate pkt and enqueue */
1984                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1985                 if (err < 0) {
1986                         printf("%d: Failed to enqueue\n", __LINE__);
1987                         return -1;
1988                 }
1989         }
1990
1991         rte_event_schedule(evdev);
1992
1993         struct test_event_dev_stats stats;
1994         err = test_event_dev_stats_get(evdev, &stats);
1995         if (err) {
1996                 printf("%d: failed to get stats\n", __LINE__);
1997                 return -1;
1998         }
1999
2000         if (stats.port_inflight[1] != 4) {
2001                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2002                                 __func__);
2003                 return -1;
2004         }
2005         if (stats.port_inflight[2] != 2) {
2006                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2007                                 __func__);
2008                 return -1;
2009         }
2010         if (stats.port_inflight[3] != 3) {
2011                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2012                                 __func__);
2013                 return -1;
2014         }
2015
2016         cleanup(t);
2017         return 0;
2018 }
2019
2020 static int
2021 load_balancing_history(struct test *t)
2022 {
2023         struct test_event_dev_stats stats = {0};
2024         const int rx_enq = 0;
2025         int err;
2026         uint32_t i;
2027
2028         /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2029         if (init(t, 1, 4) < 0 ||
2030                         create_ports(t, 4) < 0 ||
2031                         create_atomic_qids(t, 1) < 0)
2032                 return -1;
2033
2034         /* CQ mapping to QID */
2035         if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2036                 printf("%d: error mapping port 1 qid\n", __LINE__);
2037                 return -1;
2038         }
2039         if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2040                 printf("%d: error mapping port 2 qid\n", __LINE__);
2041                 return -1;
2042         }
2043         if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2044                 printf("%d: error mapping port 3 qid\n", __LINE__);
2045                 return -1;
2046         }
2047         if (rte_event_dev_start(evdev) < 0) {
2048                 printf("%d: Error with start call\n", __LINE__);
2049                 return -1;
2050         }
2051
2052         /*
2053          * Create a set of flows that test the load-balancing operation of the
2054          * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2055          * the packet from CQ 0, send in a new set of flows. Ensure that:
2056          *  1. The new flow 3 gets into the empty CQ0
2057          *  2. packets for existing flow gets added into CQ1
2058          *  3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2059          *     more outstanding pkts
2060          *
2061          *  This test makes sure that when a flow ends (i.e. all packets
2062          *  have been completed for that flow), that the flow can be moved
2063          *  to a different CQ when new packets come in for that flow.
2064          */
2065         static uint32_t flows1[] = {0, 1, 1, 2};
2066
2067         for (i = 0; i < RTE_DIM(flows1); i++) {
2068                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2069                 struct rte_event ev = {
2070                                 .flow_id = flows1[i],
2071                                 .op = RTE_EVENT_OP_NEW,
2072                                 .queue_id = t->qid[0],
2073                                 .event_type = RTE_EVENT_TYPE_CPU,
2074                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2075                                 .mbuf = arp
2076                 };
2077
2078                 if (!arp) {
2079                         printf("%d: gen of pkt failed\n", __LINE__);
2080                         return -1;
2081                 }
2082                 arp->hash.rss = flows1[i];
2083                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2084                 if (err < 0) {
2085                         printf("%d: Failed to enqueue\n", __LINE__);
2086                         return -1;
2087                 }
2088         }
2089
2090         /* call the scheduler */
2091         rte_event_schedule(evdev);
2092
2093         /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2094         struct rte_event ev;
2095         if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2096                 printf("%d: failed to dequeue\n", __LINE__);
2097                 return -1;
2098         }
2099         if (ev.mbuf->hash.rss != flows1[0]) {
2100                 printf("%d: unexpected flow received\n", __LINE__);
2101                 return -1;
2102         }
2103
2104         /* drop the flow 0 packet from port 1 */
2105         rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2106
2107         /* call the scheduler */
2108         rte_event_schedule(evdev);
2109
2110         /*
2111          * Set up the next set of flows, first a new flow to fill up
2112          * CQ 0, so that the next flow 0 packet should go to CQ2
2113          */
2114         static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2115
2116         for (i = 0; i < RTE_DIM(flows2); i++) {
2117                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2118                 struct rte_event ev = {
2119                                 .flow_id = flows2[i],
2120                                 .op = RTE_EVENT_OP_NEW,
2121                                 .queue_id = t->qid[0],
2122                                 .event_type = RTE_EVENT_TYPE_CPU,
2123                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2124                                 .mbuf = arp
2125                 };
2126
2127                 if (!arp) {
2128                         printf("%d: gen of pkt failed\n", __LINE__);
2129                         return -1;
2130                 }
2131                 arp->hash.rss = flows2[i];
2132
2133                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2134                 if (err < 0) {
2135                         printf("%d: Failed to enqueue\n", __LINE__);
2136                         return -1;
2137                 }
2138         }
2139
2140         /* schedule */
2141         rte_event_schedule(evdev);
2142
2143         err = test_event_dev_stats_get(evdev, &stats);
2144         if (err) {
2145                 printf("%d:failed to get stats\n", __LINE__);
2146                 return -1;
2147         }
2148
2149         /*
2150          * Now check the resulting inflights on each port.
2151          */
2152         if (stats.port_inflight[1] != 3) {
2153                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2154                                 __func__);
2155                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2156                                 (unsigned int)stats.port_inflight[1],
2157                                 (unsigned int)stats.port_inflight[2],
2158                                 (unsigned int)stats.port_inflight[3]);
2159                 return -1;
2160         }
2161         if (stats.port_inflight[2] != 4) {
2162                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2163                                 __func__);
2164                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2165                                 (unsigned int)stats.port_inflight[1],
2166                                 (unsigned int)stats.port_inflight[2],
2167                                 (unsigned int)stats.port_inflight[3]);
2168                 return -1;
2169         }
2170         if (stats.port_inflight[3] != 2) {
2171                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2172                                 __func__);
2173                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2174                                 (unsigned int)stats.port_inflight[1],
2175                                 (unsigned int)stats.port_inflight[2],
2176                                 (unsigned int)stats.port_inflight[3]);
2177                 return -1;
2178         }
2179
2180         for (i = 1; i <= 3; i++) {
2181                 struct rte_event ev;
2182                 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2183                         rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2184         }
2185         rte_event_schedule(evdev);
2186
2187         cleanup(t);
2188         return 0;
2189 }
2190
2191 static int
2192 invalid_qid(struct test *t)
2193 {
2194         struct test_event_dev_stats stats;
2195         const int rx_enq = 0;
2196         int err;
2197         uint32_t i;
2198
2199         if (init(t, 1, 4) < 0 ||
2200                         create_ports(t, 4) < 0 ||
2201                         create_atomic_qids(t, 1) < 0) {
2202                 printf("%d: Error initializing device\n", __LINE__);
2203                 return -1;
2204         }
2205
2206         /* CQ mapping to QID */
2207         for (i = 0; i < 4; i++) {
2208                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2209                                 NULL, 1);
2210                 if (err != 1) {
2211                         printf("%d: error mapping port 1 qid\n", __LINE__);
2212                         return -1;
2213                 }
2214         }
2215
2216         if (rte_event_dev_start(evdev) < 0) {
2217                 printf("%d: Error with start call\n", __LINE__);
2218                 return -1;
2219         }
2220
2221         /*
2222          * Send in a packet with an invalid qid to the scheduler.
2223          * We should see the packed enqueued OK, but the inflights for
2224          * that packet should not be incremented, and the rx_dropped
2225          * should be incremented.
2226          */
2227         static uint32_t flows1[] = {20};
2228
2229         for (i = 0; i < RTE_DIM(flows1); i++) {
2230                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2231                 if (!arp) {
2232                         printf("%d: gen of pkt failed\n", __LINE__);
2233                         return -1;
2234                 }
2235
2236                 struct rte_event ev = {
2237                                 .op = RTE_EVENT_OP_NEW,
2238                                 .queue_id = t->qid[0] + flows1[i],
2239                                 .flow_id = i,
2240                                 .mbuf = arp,
2241                 };
2242                 /* generate pkt and enqueue */
2243                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2244                 if (err < 0) {
2245                         printf("%d: Failed to enqueue\n", __LINE__);
2246                         return -1;
2247                 }
2248         }
2249
2250         /* call the scheduler */
2251         rte_event_schedule(evdev);
2252
2253         err = test_event_dev_stats_get(evdev, &stats);
2254         if (err) {
2255                 printf("%d: failed to get stats\n", __LINE__);
2256                 return -1;
2257         }
2258
2259         /*
2260          * Now check the resulting inflights on the port, and the rx_dropped.
2261          */
2262         if (stats.port_inflight[0] != 0) {
2263                 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2264                                 __func__);
2265                 rte_event_dev_dump(evdev, stdout);
2266                 return -1;
2267         }
2268         if (stats.port_rx_dropped[0] != 1) {
2269                 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2270                 rte_event_dev_dump(evdev, stdout);
2271                 return -1;
2272         }
2273         /* each packet drop should only be counted in one place - port or dev */
2274         if (stats.rx_dropped != 0) {
2275                 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2276                                 __func__);
2277                 rte_event_dev_dump(evdev, stdout);
2278                 return -1;
2279         }
2280
2281         cleanup(t);
2282         return 0;
2283 }
2284
2285 static int
2286 single_packet(struct test *t)
2287 {
2288         const uint32_t MAGIC_SEQN = 7321;
2289         struct rte_event ev;
2290         struct test_event_dev_stats stats;
2291         const int rx_enq = 0;
2292         const int wrk_enq = 2;
2293         int err;
2294
2295         /* Create instance with 4 ports */
2296         if (init(t, 1, 4) < 0 ||
2297                         create_ports(t, 4) < 0 ||
2298                         create_atomic_qids(t, 1) < 0) {
2299                 printf("%d: Error initializing device\n", __LINE__);
2300                 return -1;
2301         }
2302
2303         /* CQ mapping to QID */
2304         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2305         if (err != 1) {
2306                 printf("%d: error mapping lb qid\n", __LINE__);
2307                 cleanup(t);
2308                 return -1;
2309         }
2310
2311         if (rte_event_dev_start(evdev) < 0) {
2312                 printf("%d: Error with start call\n", __LINE__);
2313                 return -1;
2314         }
2315
2316         /************** Gen pkt and enqueue ****************/
2317         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2318         if (!arp) {
2319                 printf("%d: gen of pkt failed\n", __LINE__);
2320                 return -1;
2321         }
2322
2323         ev.op = RTE_EVENT_OP_NEW;
2324         ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2325         ev.mbuf = arp;
2326         ev.queue_id = 0;
2327         ev.flow_id = 3;
2328         arp->seqn = MAGIC_SEQN;
2329
2330         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2331         if (err < 0) {
2332                 printf("%d: Failed to enqueue\n", __LINE__);
2333                 return -1;
2334         }
2335
2336         rte_event_schedule(evdev);
2337
2338         err = test_event_dev_stats_get(evdev, &stats);
2339         if (err) {
2340                 printf("%d: failed to get stats\n", __LINE__);
2341                 return -1;
2342         }
2343
2344         if (stats.rx_pkts != 1 ||
2345                         stats.tx_pkts != 1 ||
2346                         stats.port_inflight[wrk_enq] != 1) {
2347                 printf("%d: Sched core didn't handle pkt as expected\n",
2348                                 __LINE__);
2349                 rte_event_dev_dump(evdev, stdout);
2350                 return -1;
2351         }
2352
2353         uint32_t deq_pkts;
2354
2355         deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2356         if (deq_pkts < 1) {
2357                 printf("%d: Failed to deq\n", __LINE__);
2358                 return -1;
2359         }
2360
2361         err = test_event_dev_stats_get(evdev, &stats);
2362         if (err) {
2363                 printf("%d: failed to get stats\n", __LINE__);
2364                 return -1;
2365         }
2366
2367         err = test_event_dev_stats_get(evdev, &stats);
2368         if (ev.mbuf->seqn != MAGIC_SEQN) {
2369                 printf("%d: magic sequence number not dequeued\n", __LINE__);
2370                 return -1;
2371         }
2372
2373         rte_pktmbuf_free(ev.mbuf);
2374         err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2375         if (err < 0) {
2376                 printf("%d: Failed to enqueue\n", __LINE__);
2377                 return -1;
2378         }
2379         rte_event_schedule(evdev);
2380
2381         err = test_event_dev_stats_get(evdev, &stats);
2382         if (stats.port_inflight[wrk_enq] != 0) {
2383                 printf("%d: port inflight not correct\n", __LINE__);
2384                 return -1;
2385         }
2386
2387         cleanup(t);
2388         return 0;
2389 }
2390
2391 static int
2392 inflight_counts(struct test *t)
2393 {
2394         struct rte_event ev;
2395         struct test_event_dev_stats stats;
2396         const int rx_enq = 0;
2397         const int p1 = 1;
2398         const int p2 = 2;
2399         int err;
2400         int i;
2401
2402         /* Create instance with 4 ports */
2403         if (init(t, 2, 3) < 0 ||
2404                         create_ports(t, 3) < 0 ||
2405                         create_atomic_qids(t, 2) < 0) {
2406                 printf("%d: Error initializing device\n", __LINE__);
2407                 return -1;
2408         }
2409
2410         /* CQ mapping to QID */
2411         err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2412         if (err != 1) {
2413                 printf("%d: error mapping lb qid\n", __LINE__);
2414                 cleanup(t);
2415                 return -1;
2416         }
2417         err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2418         if (err != 1) {
2419                 printf("%d: error mapping lb qid\n", __LINE__);
2420                 cleanup(t);
2421                 return -1;
2422         }
2423
2424         if (rte_event_dev_start(evdev) < 0) {
2425                 printf("%d: Error with start call\n", __LINE__);
2426                 return -1;
2427         }
2428
2429         /************** FORWARD ****************/
2430 #define QID1_NUM 5
2431         for (i = 0; i < QID1_NUM; i++) {
2432                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2433
2434                 if (!arp) {
2435                         printf("%d: gen of pkt failed\n", __LINE__);
2436                         goto err;
2437                 }
2438
2439                 ev.queue_id =  t->qid[0];
2440                 ev.op = RTE_EVENT_OP_NEW;
2441                 ev.mbuf = arp;
2442                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2443                 if (err != 1) {
2444                         printf("%d: Failed to enqueue\n", __LINE__);
2445                         goto err;
2446                 }
2447         }
2448 #define QID2_NUM 3
2449         for (i = 0; i < QID2_NUM; i++) {
2450                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2451
2452                 if (!arp) {
2453                         printf("%d: gen of pkt failed\n", __LINE__);
2454                         goto err;
2455                 }
2456                 ev.queue_id =  t->qid[1];
2457                 ev.op = RTE_EVENT_OP_NEW;
2458                 ev.mbuf = arp;
2459                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2460                 if (err != 1) {
2461                         printf("%d: Failed to enqueue\n", __LINE__);
2462                         goto err;
2463                 }
2464         }
2465
2466         /* schedule */
2467         rte_event_schedule(evdev);
2468
2469         err = test_event_dev_stats_get(evdev, &stats);
2470         if (err) {
2471                 printf("%d: failed to get stats\n", __LINE__);
2472                 goto err;
2473         }
2474
2475         if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2476                         stats.tx_pkts != QID1_NUM + QID2_NUM) {
2477                 printf("%d: Sched core didn't handle pkt as expected\n",
2478                                 __LINE__);
2479                 goto err;
2480         }
2481
2482         if (stats.port_inflight[p1] != QID1_NUM) {
2483                 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2484                                 __func__);
2485                 goto err;
2486         }
2487         if (stats.port_inflight[p2] != QID2_NUM) {
2488                 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2489                                 __func__);
2490                 goto err;
2491         }
2492
2493         /************** DEQUEUE INFLIGHT COUNT CHECKS  ****************/
2494         /* port 1 */
2495         struct rte_event events[QID1_NUM + QID2_NUM];
2496         uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2497                         RTE_DIM(events), 0);
2498
2499         if (deq_pkts != QID1_NUM) {
2500                 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2501                 goto err;
2502         }
2503         err = test_event_dev_stats_get(evdev, &stats);
2504         if (stats.port_inflight[p1] != QID1_NUM) {
2505                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2506                                 __LINE__);
2507                 goto err;
2508         }
2509         for (i = 0; i < QID1_NUM; i++) {
2510                 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2511                                 1);
2512                 if (err != 1) {
2513                         printf("%d: %s rte enqueue of inf release failed\n",
2514                                 __LINE__, __func__);
2515                         goto err;
2516                 }
2517         }
2518
2519         /*
2520          * As the scheduler core decrements inflights, it needs to run to
2521          * process packets to act on the drop messages
2522          */
2523         rte_event_schedule(evdev);
2524
2525         err = test_event_dev_stats_get(evdev, &stats);
2526         if (stats.port_inflight[p1] != 0) {
2527                 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2528                 goto err;
2529         }
2530
2531         /* port2 */
2532         deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2533                         RTE_DIM(events), 0);
2534         if (deq_pkts != QID2_NUM) {
2535                 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2536                 goto err;
2537         }
2538         err = test_event_dev_stats_get(evdev, &stats);
2539         if (stats.port_inflight[p2] != QID2_NUM) {
2540                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2541                                 __LINE__);
2542                 goto err;
2543         }
2544         for (i = 0; i < QID2_NUM; i++) {
2545                 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2546                                 1);
2547                 if (err != 1) {
2548                         printf("%d: %s rte enqueue of inf release failed\n",
2549                                 __LINE__, __func__);
2550                         goto err;
2551                 }
2552         }
2553
2554         /*
2555          * As the scheduler core decrements inflights, it needs to run to
2556          * process packets to act on the drop messages
2557          */
2558         rte_event_schedule(evdev);
2559
2560         err = test_event_dev_stats_get(evdev, &stats);
2561         if (stats.port_inflight[p2] != 0) {
2562                 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2563                 goto err;
2564         }
2565         cleanup(t);
2566         return 0;
2567
2568 err:
2569         rte_event_dev_dump(evdev, stdout);
2570         cleanup(t);
2571         return -1;
2572 }
2573
2574 static int
2575 parallel_basic(struct test *t, int check_order)
2576 {
2577         const uint8_t rx_port = 0;
2578         const uint8_t w1_port = 1;
2579         const uint8_t w3_port = 3;
2580         const uint8_t tx_port = 4;
2581         int err;
2582         int i;
2583         uint32_t deq_pkts, j;
2584         struct rte_mbuf *mbufs[3];
2585         struct rte_mbuf *mbufs_out[3] = { 0 };
2586         const uint32_t MAGIC_SEQN = 1234;
2587
2588         /* Create instance with 4 ports */
2589         if (init(t, 2, tx_port + 1) < 0 ||
2590                         create_ports(t, tx_port + 1) < 0 ||
2591                         (check_order ?  create_ordered_qids(t, 1) :
2592                                 create_unordered_qids(t, 1)) < 0 ||
2593                         create_directed_qids(t, 1, &tx_port)) {
2594                 printf("%d: Error initializing device\n", __LINE__);
2595                 return -1;
2596         }
2597
2598         /*
2599          * CQ mapping to QID
2600          * We need three ports, all mapped to the same ordered qid0. Then we'll
2601          * take a packet out to each port, re-enqueue in reverse order,
2602          * then make sure the reordering has taken place properly when we
2603          * dequeue from the tx_port.
2604          *
2605          * Simplified test setup diagram:
2606          *
2607          * rx_port        w1_port
2608          *        \     /         \
2609          *         qid0 - w2_port - qid1
2610          *              \         /     \
2611          *                w3_port        tx_port
2612          */
2613         /* CQ mapping to QID for LB ports (directed mapped on create) */
2614         for (i = w1_port; i <= w3_port; i++) {
2615                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2616                                 1);
2617                 if (err != 1) {
2618                         printf("%d: error mapping lb qid\n", __LINE__);
2619                         cleanup(t);
2620                         return -1;
2621                 }
2622         }
2623
2624         if (rte_event_dev_start(evdev) < 0) {
2625                 printf("%d: Error with start call\n", __LINE__);
2626                 return -1;
2627         }
2628
2629         /* Enqueue 3 packets to the rx port */
2630         for (i = 0; i < 3; i++) {
2631                 struct rte_event ev;
2632                 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2633                 if (!mbufs[i]) {
2634                         printf("%d: gen of pkt failed\n", __LINE__);
2635                         return -1;
2636                 }
2637
2638                 ev.queue_id = t->qid[0];
2639                 ev.op = RTE_EVENT_OP_NEW;
2640                 ev.mbuf = mbufs[i];
2641                 mbufs[i]->seqn = MAGIC_SEQN + i;
2642
2643                 /* generate pkt and enqueue */
2644                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2645                 if (err != 1) {
2646                         printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2647                                         __LINE__, i, err);
2648                         return -1;
2649                 }
2650         }
2651
2652         rte_event_schedule(evdev);
2653
2654         /* use extra slot to make logic in loops easier */
2655         struct rte_event deq_ev[w3_port + 1];
2656
2657         /* Dequeue the 3 packets, one from each worker port */
2658         for (i = w1_port; i <= w3_port; i++) {
2659                 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2660                                 &deq_ev[i], 1, 0);
2661                 if (deq_pkts != 1) {
2662                         printf("%d: Failed to deq\n", __LINE__);
2663                         rte_event_dev_dump(evdev, stdout);
2664                         return -1;
2665                 }
2666         }
2667
2668         /* Enqueue each packet in reverse order, flushing after each one */
2669         for (i = w3_port; i >= w1_port; i--) {
2670
2671                 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2672                 deq_ev[i].queue_id = t->qid[1];
2673                 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2674                 if (err != 1) {
2675                         printf("%d: Failed to enqueue\n", __LINE__);
2676                         return -1;
2677                 }
2678         }
2679         rte_event_schedule(evdev);
2680
2681         /* dequeue from the tx ports, we should get 3 packets */
2682         deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2683                         3, 0);
2684
2685         /* Check to see if we've got all 3 packets */
2686         if (deq_pkts != 3) {
2687                 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2688                         __LINE__, deq_pkts, tx_port);
2689                 rte_event_dev_dump(evdev, stdout);
2690                 return 1;
2691         }
2692
2693         /* Check to see if the sequence numbers are in expected order */
2694         if (check_order) {
2695                 for (j = 0 ; j < deq_pkts ; j++) {
2696                         if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2697                                 printf(
2698                                         "%d: Incorrect sequence number(%d) from port %d\n",
2699                                         __LINE__, mbufs_out[j]->seqn, tx_port);
2700                                 return -1;
2701                         }
2702                 }
2703         }
2704
2705         /* Destroy the instance */
2706         cleanup(t);
2707         return 0;
2708 }
2709
2710 static int
2711 ordered_basic(struct test *t)
2712 {
2713         return parallel_basic(t, 1);
2714 }
2715
2716 static int
2717 unordered_basic(struct test *t)
2718 {
2719         return parallel_basic(t, 0);
2720 }
2721
2722 static int
2723 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2724 {
2725         const struct rte_event new_ev = {
2726                         .op = RTE_EVENT_OP_NEW
2727                         /* all other fields zero */
2728         };
2729         struct rte_event ev = new_ev;
2730         unsigned int rx_port = 0; /* port we get the first flow on */
2731         char rx_port_used_stat[64];
2732         char rx_port_free_stat[64];
2733         char other_port_used_stat[64];
2734
2735         if (init(t, 1, 2) < 0 ||
2736                         create_ports(t, 2) < 0 ||
2737                         create_atomic_qids(t, 1) < 0) {
2738                 printf("%d: Error initializing device\n", __LINE__);
2739                 return -1;
2740         }
2741         int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2742         if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2743                         nb_links != 1) {
2744                 printf("%d: Error links queue to ports\n", __LINE__);
2745                 goto err;
2746         }
2747         if (rte_event_dev_start(evdev) < 0) {
2748                 printf("%d: Error with start call\n", __LINE__);
2749                 goto err;
2750         }
2751
2752         /* send one packet and see where it goes, port 0 or 1 */
2753         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2754                 printf("%d: Error doing first enqueue\n", __LINE__);
2755                 goto err;
2756         }
2757         rte_event_schedule(evdev);
2758
2759         if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2760                         != 1)
2761                 rx_port = 1;
2762
2763         snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2764                         "port_%u_cq_ring_used", rx_port);
2765         snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2766                         "port_%u_cq_ring_free", rx_port);
2767         snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2768                         "port_%u_cq_ring_used", rx_port ^ 1);
2769         if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2770                         != 1) {
2771                 printf("%d: Error, first event not scheduled\n", __LINE__);
2772                 goto err;
2773         }
2774
2775         /* now fill up the rx port's queue with one flow to cause HOLB */
2776         do {
2777                 ev = new_ev;
2778                 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2779                         printf("%d: Error with enqueue\n", __LINE__);
2780                         goto err;
2781                 }
2782                 rte_event_schedule(evdev);
2783         } while (rte_event_dev_xstats_by_name_get(evdev,
2784                                 rx_port_free_stat, NULL) != 0);
2785
2786         /* one more packet, which needs to stay in IQ - i.e. HOLB */
2787         ev = new_ev;
2788         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2789                 printf("%d: Error with enqueue\n", __LINE__);
2790                 goto err;
2791         }
2792         rte_event_schedule(evdev);
2793
2794         /* check that the other port still has an empty CQ */
2795         if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2796                         != 0) {
2797                 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2798                 goto err;
2799         }
2800         /* check IQ now has one packet */
2801         if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2802                         != 1) {
2803                 printf("%d: Error, QID does not have exactly 1 packet\n",
2804                         __LINE__);
2805                 goto err;
2806         }
2807
2808         /* send another flow, which should pass the other IQ entry */
2809         ev = new_ev;
2810         ev.flow_id = 1;
2811         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2812                 printf("%d: Error with enqueue\n", __LINE__);
2813                 goto err;
2814         }
2815         rte_event_schedule(evdev);
2816
2817         if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2818                         != 1) {
2819                 printf("%d: Error, second flow did not pass out first\n",
2820                         __LINE__);
2821                 goto err;
2822         }
2823
2824         if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2825                         != 1) {
2826                 printf("%d: Error, QID does not have exactly 1 packet\n",
2827                         __LINE__);
2828                 goto err;
2829         }
2830         cleanup(t);
2831         return 0;
2832 err:
2833         rte_event_dev_dump(evdev, stdout);
2834         cleanup(t);
2835         return -1;
2836 }
2837
2838 static int
2839 worker_loopback_worker_fn(void *arg)
2840 {
2841         struct test *t = arg;
2842         uint8_t port = t->port[1];
2843         int count = 0;
2844         int enqd;
2845
2846         /*
2847          * Takes packets from the input port and then loops them back through
2848          * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2849          * so each packet goes through 8*16 = 128 times.
2850          */
2851         printf("%d: \tWorker function started\n", __LINE__);
2852         while (count < NUM_PACKETS) {
2853 #define BURST_SIZE 32
2854                 struct rte_event ev[BURST_SIZE];
2855                 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2856                                 BURST_SIZE, 0);
2857                 if (nb_rx == 0) {
2858                         rte_pause();
2859                         continue;
2860                 }
2861
2862                 for (i = 0; i < nb_rx; i++) {
2863                         ev[i].queue_id++;
2864                         if (ev[i].queue_id != 8) {
2865                                 ev[i].op = RTE_EVENT_OP_FORWARD;
2866                                 enqd = rte_event_enqueue_burst(evdev, port,
2867                                                 &ev[i], 1);
2868                                 if (enqd != 1) {
2869                                         printf("%d: Can't enqueue FWD!!\n",
2870                                                         __LINE__);
2871                                         return -1;
2872                                 }
2873                                 continue;
2874                         }
2875
2876                         ev[i].queue_id = 0;
2877                         ev[i].mbuf->udata64++;
2878                         if (ev[i].mbuf->udata64 != 16) {
2879                                 ev[i].op = RTE_EVENT_OP_FORWARD;
2880                                 enqd = rte_event_enqueue_burst(evdev, port,
2881                                                 &ev[i], 1);
2882                                 if (enqd != 1) {
2883                                         printf("%d: Can't enqueue FWD!!\n",
2884                                                         __LINE__);
2885                                         return -1;
2886                                 }
2887                                 continue;
2888                         }
2889                         /* we have hit 16 iterations through system - drop */
2890                         rte_pktmbuf_free(ev[i].mbuf);
2891                         count++;
2892                         ev[i].op = RTE_EVENT_OP_RELEASE;
2893                         enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
2894                         if (enqd != 1) {
2895                                 printf("%d drop enqueue failed\n", __LINE__);
2896                                 return -1;
2897                         }
2898                 }
2899         }
2900
2901         return 0;
2902 }
2903
2904 static int
2905 worker_loopback_producer_fn(void *arg)
2906 {
2907         struct test *t = arg;
2908         uint8_t port = t->port[0];
2909         uint64_t count = 0;
2910
2911         printf("%d: \tProducer function started\n", __LINE__);
2912         while (count < NUM_PACKETS) {
2913                 struct rte_mbuf *m = 0;
2914                 do {
2915                         m = rte_pktmbuf_alloc(t->mbuf_pool);
2916                 } while (m == NULL);
2917
2918                 m->udata64 = 0;
2919
2920                 struct rte_event ev = {
2921                                 .op = RTE_EVENT_OP_NEW,
2922                                 .queue_id = t->qid[0],
2923                                 .flow_id = (uintptr_t)m & 0xFFFF,
2924                                 .mbuf = m,
2925                 };
2926
2927                 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
2928                         while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
2929                                         1)
2930                                 rte_pause();
2931                 }
2932
2933                 count++;
2934         }
2935
2936         return 0;
2937 }
2938
2939 static int
2940 worker_loopback(struct test *t)
2941 {
2942         /* use a single producer core, and a worker core to see what happens
2943          * if the worker loops packets back multiple times
2944          */
2945         struct test_event_dev_stats stats;
2946         uint64_t print_cycles = 0, cycles = 0;
2947         uint64_t tx_pkts = 0;
2948         int err;
2949         int w_lcore, p_lcore;
2950
2951         if (init(t, 8, 2) < 0 ||
2952                         create_atomic_qids(t, 8) < 0) {
2953                 printf("%d: Error initializing device\n", __LINE__);
2954                 return -1;
2955         }
2956
2957         /* RX with low max events */
2958         static struct rte_event_port_conf conf = {
2959                         .dequeue_depth = 32,
2960                         .enqueue_depth = 64,
2961         };
2962         /* beware: this cannot be initialized in the static above as it would
2963          * only be initialized once - and this needs to be set for multiple runs
2964          */
2965         conf.new_event_threshold = 512;
2966
2967         if (rte_event_port_setup(evdev, 0, &conf) < 0) {
2968                 printf("Error setting up RX port\n");
2969                 return -1;
2970         }
2971         t->port[0] = 0;
2972         /* TX with higher max events */
2973         conf.new_event_threshold = 4096;
2974         if (rte_event_port_setup(evdev, 1, &conf) < 0) {
2975                 printf("Error setting up TX port\n");
2976                 return -1;
2977         }
2978         t->port[1] = 1;
2979
2980         /* CQ mapping to QID */
2981         err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2982         if (err != 8) { /* should have mapped all queues*/
2983                 printf("%d: error mapping port 2 to all qids\n", __LINE__);
2984                 return -1;
2985         }
2986
2987         if (rte_event_dev_start(evdev) < 0) {
2988                 printf("%d: Error with start call\n", __LINE__);
2989                 return -1;
2990         }
2991
2992         p_lcore = rte_get_next_lcore(
2993                         /* start core */ -1,
2994                         /* skip master */ 1,
2995                         /* wrap */ 0);
2996         w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
2997
2998         rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
2999         rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
3000
3001         print_cycles = cycles = rte_get_timer_cycles();
3002         while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
3003                         rte_eal_get_lcore_state(w_lcore) != FINISHED) {
3004
3005                 rte_event_schedule(evdev);
3006
3007                 uint64_t new_cycles = rte_get_timer_cycles();
3008
3009                 if (new_cycles - print_cycles > rte_get_timer_hz()) {
3010                         test_event_dev_stats_get(evdev, &stats);
3011                         printf(
3012                                 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
3013                                 __LINE__, stats.rx_pkts, stats.tx_pkts);
3014
3015                         print_cycles = new_cycles;
3016                 }
3017                 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
3018                         test_event_dev_stats_get(evdev, &stats);
3019                         if (stats.tx_pkts == tx_pkts) {
3020                                 rte_event_dev_dump(evdev, stdout);
3021                                 printf("Dumping xstats:\n");
3022                                 xstats_print();
3023                                 printf(
3024                                         "%d: No schedules for seconds, deadlock\n",
3025                                         __LINE__);
3026                                 return -1;
3027                         }
3028                         tx_pkts = stats.tx_pkts;
3029                         cycles = new_cycles;
3030                 }
3031         }
3032         rte_event_schedule(evdev); /* ensure all completions are flushed */
3033
3034         rte_eal_mp_wait_lcore();
3035
3036         cleanup(t);
3037         return 0;
3038 }
3039
3040 static struct rte_mempool *eventdev_func_mempool;
3041
3042 static int
3043 test_sw_eventdev(void)
3044 {
3045         struct test *t = malloc(sizeof(struct test));
3046         int ret;
3047
3048         /* manually initialize the op, older gcc's complain on static
3049          * initialization of struct elements that are a bitfield.
3050          */
3051         release_ev.op = RTE_EVENT_OP_RELEASE;
3052
3053         const char *eventdev_name = "event_sw0";
3054         evdev = rte_event_dev_get_dev_id(eventdev_name);
3055         if (evdev < 0) {
3056                 printf("%d: Eventdev %s not found - creating.\n",
3057                                 __LINE__, eventdev_name);
3058                 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3059                         printf("Error creating eventdev\n");
3060                         return -1;
3061                 }
3062                 evdev = rte_event_dev_get_dev_id(eventdev_name);
3063                 if (evdev < 0) {
3064                         printf("Error finding newly created eventdev\n");
3065                         return -1;
3066                 }
3067         }
3068
3069         /* Only create mbuf pool once, reuse for each test run */
3070         if (!eventdev_func_mempool) {
3071                 eventdev_func_mempool = rte_pktmbuf_pool_create(
3072                                 "EVENTDEV_SW_SA_MBUF_POOL",
3073                                 (1<<12), /* 4k buffers */
3074                                 32 /*MBUF_CACHE_SIZE*/,
3075                                 0,
3076                                 512, /* use very small mbufs */
3077                                 rte_socket_id());
3078                 if (!eventdev_func_mempool) {
3079                         printf("ERROR creating mempool\n");
3080                         return -1;
3081                 }
3082         }
3083         t->mbuf_pool = eventdev_func_mempool;
3084         printf("*** Running Single Directed Packet test...\n");
3085         ret = test_single_directed_packet(t);
3086         if (ret != 0) {
3087                 printf("ERROR - Single Directed Packet test FAILED.\n");
3088                 return ret;
3089         }
3090         printf("*** Running Directed Forward Credit test...\n");
3091         ret = test_directed_forward_credits(t);
3092         if (ret != 0) {
3093                 printf("ERROR - Directed Forward Credit test FAILED.\n");
3094                 return ret;
3095         }
3096         printf("*** Running Single Load Balanced Packet test...\n");
3097         ret = single_packet(t);
3098         if (ret != 0) {
3099                 printf("ERROR - Single Packet test FAILED.\n");
3100                 return ret;
3101         }
3102         printf("*** Running Unordered Basic test...\n");
3103         ret = unordered_basic(t);
3104         if (ret != 0) {
3105                 printf("ERROR -  Unordered Basic test FAILED.\n");
3106                 return ret;
3107         }
3108         printf("*** Running Ordered Basic test...\n");
3109         ret = ordered_basic(t);
3110         if (ret != 0) {
3111                 printf("ERROR -  Ordered Basic test FAILED.\n");
3112                 return ret;
3113         }
3114         printf("*** Running Burst Packets test...\n");
3115         ret = burst_packets(t);
3116         if (ret != 0) {
3117                 printf("ERROR - Burst Packets test FAILED.\n");
3118                 return ret;
3119         }
3120         printf("*** Running Load Balancing test...\n");
3121         ret = load_balancing(t);
3122         if (ret != 0) {
3123                 printf("ERROR - Load Balancing test FAILED.\n");
3124                 return ret;
3125         }
3126         printf("*** Running Prioritized Directed test...\n");
3127         ret = test_priority_directed(t);
3128         if (ret != 0) {
3129                 printf("ERROR - Prioritized Directed test FAILED.\n");
3130                 return ret;
3131         }
3132         printf("*** Running Prioritized Atomic test...\n");
3133         ret = test_priority_atomic(t);
3134         if (ret != 0) {
3135                 printf("ERROR - Prioritized Atomic test FAILED.\n");
3136                 return ret;
3137         }
3138
3139         printf("*** Running Prioritized Ordered test...\n");
3140         ret = test_priority_ordered(t);
3141         if (ret != 0) {
3142                 printf("ERROR - Prioritized Ordered test FAILED.\n");
3143                 return ret;
3144         }
3145         printf("*** Running Prioritized Unordered test...\n");
3146         ret = test_priority_unordered(t);
3147         if (ret != 0) {
3148                 printf("ERROR - Prioritized Unordered test FAILED.\n");
3149                 return ret;
3150         }
3151         printf("*** Running Invalid QID test...\n");
3152         ret = invalid_qid(t);
3153         if (ret != 0) {
3154                 printf("ERROR - Invalid QID test FAILED.\n");
3155                 return ret;
3156         }
3157         printf("*** Running Load Balancing History test...\n");
3158         ret = load_balancing_history(t);
3159         if (ret != 0) {
3160                 printf("ERROR - Load Balancing History test FAILED.\n");
3161                 return ret;
3162         }
3163         printf("*** Running Inflight Count test...\n");
3164         ret = inflight_counts(t);
3165         if (ret != 0) {
3166                 printf("ERROR - Inflight Count test FAILED.\n");
3167                 return ret;
3168         }
3169         printf("*** Running Abuse Inflights test...\n");
3170         ret = abuse_inflights(t);
3171         if (ret != 0) {
3172                 printf("ERROR - Abuse Inflights test FAILED.\n");
3173                 return ret;
3174         }
3175         printf("*** Running XStats test...\n");
3176         ret = xstats_tests(t);
3177         if (ret != 0) {
3178                 printf("ERROR - XStats test FAILED.\n");
3179                 return ret;
3180         }
3181         printf("*** Running XStats ID Reset test...\n");
3182         ret = xstats_id_reset_tests(t);
3183         if (ret != 0) {
3184                 printf("ERROR - XStats ID Reset test FAILED.\n");
3185                 return ret;
3186         }
3187         printf("*** Running XStats Brute Force test...\n");
3188         ret = xstats_brute_force(t);
3189         if (ret != 0) {
3190                 printf("ERROR - XStats Brute Force test FAILED.\n");
3191                 return ret;
3192         }
3193         printf("*** Running XStats ID Abuse test...\n");
3194         ret = xstats_id_abuse_tests(t);
3195         if (ret != 0) {
3196                 printf("ERROR - XStats ID Abuse test FAILED.\n");
3197                 return ret;
3198         }
3199         printf("*** Running QID Priority test...\n");
3200         ret = qid_priorities(t);
3201         if (ret != 0) {
3202                 printf("ERROR - QID Priority test FAILED.\n");
3203                 return ret;
3204         }
3205         printf("*** Running Ordered Reconfigure test...\n");
3206         ret = ordered_reconfigure(t);
3207         if (ret != 0) {
3208                 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3209                 return ret;
3210         }
3211         printf("*** Running Port LB Single Reconfig test...\n");
3212         ret = port_single_lb_reconfig(t);
3213         if (ret != 0) {
3214                 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3215                 return ret;
3216         }
3217         printf("*** Running Port Reconfig Credits test...\n");
3218         ret = port_reconfig_credits(t);
3219         if (ret != 0) {
3220                 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3221                 return ret;
3222         }
3223         printf("*** Running Head-of-line-blocking test...\n");
3224         ret = holb(t);
3225         if (ret != 0) {
3226                 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3227                 return ret;
3228         }
3229         if (rte_lcore_count() >= 3) {
3230                 printf("*** Running Worker loopback test...\n");
3231                 ret = worker_loopback(t);
3232                 if (ret != 0) {
3233                         printf("ERROR - Worker loopback test FAILED.\n");
3234                         return ret;
3235                 }
3236         } else {
3237                 printf("### Not enough cores for worker loopback test.\n");
3238                 printf("### Need at least 3 cores for test.\n");
3239         }
3240         /*
3241          * Free test instance, leaving mempool initialized, and a pointer to it
3242          * in static eventdev_func_mempool, as it is re-used on re-runs
3243          */
3244         free(t);
3245
3246         return 0;
3247 }
3248
3249 REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);