remove useless memzone includes
[dpdk.git] / test / test / test_eventdev_sw.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <string.h>
36 #include <stdint.h>
37 #include <errno.h>
38 #include <unistd.h>
39 #include <sys/queue.h>
40
41 #include <rte_memory.h>
42 #include <rte_launch.h>
43 #include <rte_eal.h>
44 #include <rte_per_lcore.h>
45 #include <rte_lcore.h>
46 #include <rte_debug.h>
47 #include <rte_ethdev.h>
48 #include <rte_cycles.h>
49 #include <rte_eventdev.h>
50 #include <rte_pause.h>
51 #include <rte_service.h>
52 #include <rte_service_component.h>
53
54 #include "test.h"
55
56 #define MAX_PORTS 16
57 #define MAX_QIDS 16
58 #define NUM_PACKETS (1<<18)
59
60 static int evdev;
61
62 struct test {
63         struct rte_mempool *mbuf_pool;
64         uint8_t port[MAX_PORTS];
65         uint8_t qid[MAX_QIDS];
66         int nb_qids;
67         uint32_t service_id;
68 };
69
70 static struct rte_event release_ev;
71
72 static inline struct rte_mbuf *
73 rte_gen_arp(int portid, struct rte_mempool *mp)
74 {
75         /*
76          * len = 14 + 46
77          * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
78          */
79         static const uint8_t arp_request[] = {
80                 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
81                 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
82                 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
83                 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
84                 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
85                 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
86                 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
87                 0x00, 0x00, 0x00, 0x00
88         };
89         struct rte_mbuf *m;
90         int pkt_len = sizeof(arp_request) - 1;
91
92         m = rte_pktmbuf_alloc(mp);
93         if (!m)
94                 return 0;
95
96         memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
97                 arp_request, pkt_len);
98         rte_pktmbuf_pkt_len(m) = pkt_len;
99         rte_pktmbuf_data_len(m) = pkt_len;
100
101         RTE_SET_USED(portid);
102
103         return m;
104 }
105
106 static void
107 xstats_print(void)
108 {
109         const uint32_t XSTATS_MAX = 1024;
110         uint32_t i;
111         uint32_t ids[XSTATS_MAX];
112         uint64_t values[XSTATS_MAX];
113         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
114
115         for (i = 0; i < XSTATS_MAX; i++)
116                 ids[i] = i;
117
118         /* Device names / values */
119         int ret = rte_event_dev_xstats_names_get(evdev,
120                                         RTE_EVENT_DEV_XSTATS_DEVICE, 0,
121                                         xstats_names, ids, XSTATS_MAX);
122         if (ret < 0) {
123                 printf("%d: xstats names get() returned error\n",
124                         __LINE__);
125                 return;
126         }
127         ret = rte_event_dev_xstats_get(evdev,
128                                         RTE_EVENT_DEV_XSTATS_DEVICE,
129                                         0, ids, values, ret);
130         if (ret > (signed int)XSTATS_MAX)
131                 printf("%s %d: more xstats available than space\n",
132                                 __func__, __LINE__);
133         for (i = 0; (signed int)i < ret; i++) {
134                 printf("%d : %s : %"PRIu64"\n",
135                                 i, xstats_names[i].name, values[i]);
136         }
137
138         /* Port names / values */
139         ret = rte_event_dev_xstats_names_get(evdev,
140                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
141                                         xstats_names, ids, XSTATS_MAX);
142         ret = rte_event_dev_xstats_get(evdev,
143                                         RTE_EVENT_DEV_XSTATS_PORT, 1,
144                                         ids, values, ret);
145         if (ret > (signed int)XSTATS_MAX)
146                 printf("%s %d: more xstats available than space\n",
147                                 __func__, __LINE__);
148         for (i = 0; (signed int)i < ret; i++) {
149                 printf("%d : %s : %"PRIu64"\n",
150                                 i, xstats_names[i].name, values[i]);
151         }
152
153         /* Queue names / values */
154         ret = rte_event_dev_xstats_names_get(evdev,
155                                         RTE_EVENT_DEV_XSTATS_QUEUE, 0,
156                                         xstats_names, ids, XSTATS_MAX);
157         ret = rte_event_dev_xstats_get(evdev,
158                                         RTE_EVENT_DEV_XSTATS_QUEUE,
159                                         1, ids, values, ret);
160         if (ret > (signed int)XSTATS_MAX)
161                 printf("%s %d: more xstats available than space\n",
162                                 __func__, __LINE__);
163         for (i = 0; (signed int)i < ret; i++) {
164                 printf("%d : %s : %"PRIu64"\n",
165                                 i, xstats_names[i].name, values[i]);
166         }
167 }
168
169 /* initialization and config */
170 static inline int
171 init(struct test *t, int nb_queues, int nb_ports)
172 {
173         struct rte_event_dev_config config = {
174                         .nb_event_queues = nb_queues,
175                         .nb_event_ports = nb_ports,
176                         .nb_event_queue_flows = 1024,
177                         .nb_events_limit = 4096,
178                         .nb_event_port_dequeue_depth = 128,
179                         .nb_event_port_enqueue_depth = 128,
180         };
181         int ret;
182
183         void *temp = t->mbuf_pool; /* save and restore mbuf pool */
184
185         memset(t, 0, sizeof(*t));
186         t->mbuf_pool = temp;
187
188         ret = rte_event_dev_configure(evdev, &config);
189         if (ret < 0)
190                 printf("%d: Error configuring device\n", __LINE__);
191         return ret;
192 };
193
194 static inline int
195 create_ports(struct test *t, int num_ports)
196 {
197         int i;
198         static const struct rte_event_port_conf conf = {
199                         .new_event_threshold = 1024,
200                         .dequeue_depth = 32,
201                         .enqueue_depth = 64,
202         };
203         if (num_ports > MAX_PORTS)
204                 return -1;
205
206         for (i = 0; i < num_ports; i++) {
207                 if (rte_event_port_setup(evdev, i, &conf) < 0) {
208                         printf("Error setting up port %d\n", i);
209                         return -1;
210                 }
211                 t->port[i] = i;
212         }
213
214         return 0;
215 }
216
217 static inline int
218 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
219 {
220         int i;
221
222         /* Q creation */
223         const struct rte_event_queue_conf conf = {
224                         .schedule_type = flags,
225                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
226                         .nb_atomic_flows = 1024,
227                         .nb_atomic_order_sequences = 1024,
228         };
229
230         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
231                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
232                         printf("%d: error creating qid %d\n", __LINE__, i);
233                         return -1;
234                 }
235                 t->qid[i] = i;
236         }
237         t->nb_qids += num_qids;
238         if (t->nb_qids > MAX_QIDS)
239                 return -1;
240
241         return 0;
242 }
243
244 static inline int
245 create_atomic_qids(struct test *t, int num_qids)
246 {
247         return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
248 }
249
250 static inline int
251 create_ordered_qids(struct test *t, int num_qids)
252 {
253         return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
254 }
255
256
257 static inline int
258 create_unordered_qids(struct test *t, int num_qids)
259 {
260         return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
261 }
262
263 static inline int
264 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
265 {
266         int i;
267
268         /* Q creation */
269         static const struct rte_event_queue_conf conf = {
270                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
271                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
272         };
273
274         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
275                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
276                         printf("%d: error creating qid %d\n", __LINE__, i);
277                         return -1;
278                 }
279                 t->qid[i] = i;
280
281                 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
282                                 &t->qid[i], NULL, 1) != 1) {
283                         printf("%d: error creating link for qid %d\n",
284                                         __LINE__, i);
285                         return -1;
286                 }
287         }
288         t->nb_qids += num_qids;
289         if (t->nb_qids > MAX_QIDS)
290                 return -1;
291
292         return 0;
293 }
294
295 /* destruction */
296 static inline int
297 cleanup(struct test *t __rte_unused)
298 {
299         rte_event_dev_stop(evdev);
300         rte_event_dev_close(evdev);
301         return 0;
302 };
303
304 struct test_event_dev_stats {
305         uint64_t rx_pkts;       /**< Total packets received */
306         uint64_t rx_dropped;    /**< Total packets dropped (Eg Invalid QID) */
307         uint64_t tx_pkts;       /**< Total packets transmitted */
308
309         /** Packets received on this port */
310         uint64_t port_rx_pkts[MAX_PORTS];
311         /** Packets dropped on this port */
312         uint64_t port_rx_dropped[MAX_PORTS];
313         /** Packets inflight on this port */
314         uint64_t port_inflight[MAX_PORTS];
315         /** Packets transmitted on this port */
316         uint64_t port_tx_pkts[MAX_PORTS];
317         /** Packets received on this qid */
318         uint64_t qid_rx_pkts[MAX_QIDS];
319         /** Packets dropped on this qid */
320         uint64_t qid_rx_dropped[MAX_QIDS];
321         /** Packets transmitted on this qid */
322         uint64_t qid_tx_pkts[MAX_QIDS];
323 };
324
325 static inline int
326 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
327 {
328         static uint32_t i;
329         static uint32_t total_ids[3]; /* rx, tx and drop */
330         static uint32_t port_rx_pkts_ids[MAX_PORTS];
331         static uint32_t port_rx_dropped_ids[MAX_PORTS];
332         static uint32_t port_inflight_ids[MAX_PORTS];
333         static uint32_t port_tx_pkts_ids[MAX_PORTS];
334         static uint32_t qid_rx_pkts_ids[MAX_QIDS];
335         static uint32_t qid_rx_dropped_ids[MAX_QIDS];
336         static uint32_t qid_tx_pkts_ids[MAX_QIDS];
337
338
339         stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
340                         "dev_rx", &total_ids[0]);
341         stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
342                         "dev_drop", &total_ids[1]);
343         stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
344                         "dev_tx", &total_ids[2]);
345         for (i = 0; i < MAX_PORTS; i++) {
346                 char name[32];
347                 snprintf(name, sizeof(name), "port_%u_rx", i);
348                 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
349                                 dev_id, name, &port_rx_pkts_ids[i]);
350                 snprintf(name, sizeof(name), "port_%u_drop", i);
351                 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
352                                 dev_id, name, &port_rx_dropped_ids[i]);
353                 snprintf(name, sizeof(name), "port_%u_inflight", i);
354                 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
355                                 dev_id, name, &port_inflight_ids[i]);
356                 snprintf(name, sizeof(name), "port_%u_tx", i);
357                 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
358                                 dev_id, name, &port_tx_pkts_ids[i]);
359         }
360         for (i = 0; i < MAX_QIDS; i++) {
361                 char name[32];
362                 snprintf(name, sizeof(name), "qid_%u_rx", i);
363                 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
364                                 dev_id, name, &qid_rx_pkts_ids[i]);
365                 snprintf(name, sizeof(name), "qid_%u_drop", i);
366                 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
367                                 dev_id, name, &qid_rx_dropped_ids[i]);
368                 snprintf(name, sizeof(name), "qid_%u_tx", i);
369                 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
370                                 dev_id, name, &qid_tx_pkts_ids[i]);
371         }
372
373         return 0;
374 }
375
376 /* run_prio_packet_test
377  * This performs a basic packet priority check on the test instance passed in.
378  * It is factored out of the main priority tests as the same tests must be
379  * performed to ensure prioritization of each type of QID.
380  *
381  * Requirements:
382  *  - An initialized test structure, including mempool
383  *  - t->port[0] is initialized for both Enq / Deq of packets to the QID
384  *  - t->qid[0] is the QID to be tested
385  *  - if LB QID, the CQ must be mapped to the QID.
386  */
387 static int
388 run_prio_packet_test(struct test *t)
389 {
390         int err;
391         const uint32_t MAGIC_SEQN[] = {4711, 1234};
392         const uint32_t PRIORITY[] = {
393                 RTE_EVENT_DEV_PRIORITY_NORMAL,
394                 RTE_EVENT_DEV_PRIORITY_HIGHEST
395         };
396         unsigned int i;
397         for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
398                 /* generate pkt and enqueue */
399                 struct rte_event ev;
400                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
401                 if (!arp) {
402                         printf("%d: gen of pkt failed\n", __LINE__);
403                         return -1;
404                 }
405                 arp->seqn = MAGIC_SEQN[i];
406
407                 ev = (struct rte_event){
408                         .priority = PRIORITY[i],
409                         .op = RTE_EVENT_OP_NEW,
410                         .queue_id = t->qid[0],
411                         .mbuf = arp
412                 };
413                 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
414                 if (err < 0) {
415                         printf("%d: error failed to enqueue\n", __LINE__);
416                         return -1;
417                 }
418         }
419
420         rte_service_run_iter_on_app_lcore(t->service_id);
421
422         struct test_event_dev_stats stats;
423         err = test_event_dev_stats_get(evdev, &stats);
424         if (err) {
425                 printf("%d: error failed to get stats\n", __LINE__);
426                 return -1;
427         }
428
429         if (stats.port_rx_pkts[t->port[0]] != 2) {
430                 printf("%d: error stats incorrect for directed port\n",
431                                 __LINE__);
432                 rte_event_dev_dump(evdev, stdout);
433                 return -1;
434         }
435
436         struct rte_event ev, ev2;
437         uint32_t deq_pkts;
438         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
439         if (deq_pkts != 1) {
440                 printf("%d: error failed to deq\n", __LINE__);
441                 rte_event_dev_dump(evdev, stdout);
442                 return -1;
443         }
444         if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
445                 printf("%d: first packet out not highest priority\n",
446                                 __LINE__);
447                 rte_event_dev_dump(evdev, stdout);
448                 return -1;
449         }
450         rte_pktmbuf_free(ev.mbuf);
451
452         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
453         if (deq_pkts != 1) {
454                 printf("%d: error failed to deq\n", __LINE__);
455                 rte_event_dev_dump(evdev, stdout);
456                 return -1;
457         }
458         if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
459                 printf("%d: second packet out not lower priority\n",
460                                 __LINE__);
461                 rte_event_dev_dump(evdev, stdout);
462                 return -1;
463         }
464         rte_pktmbuf_free(ev2.mbuf);
465
466         cleanup(t);
467         return 0;
468 }
469
470 static int
471 test_single_directed_packet(struct test *t)
472 {
473         const int rx_enq = 0;
474         const int wrk_enq = 2;
475         int err;
476
477         /* Create instance with 3 directed QIDs going to 3 ports */
478         if (init(t, 3, 3) < 0 ||
479                         create_ports(t, 3) < 0 ||
480                         create_directed_qids(t, 3, t->port) < 0)
481                 return -1;
482
483         if (rte_event_dev_start(evdev) < 0) {
484                 printf("%d: Error with start call\n", __LINE__);
485                 return -1;
486         }
487
488         /************** FORWARD ****************/
489         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
490         struct rte_event ev = {
491                         .op = RTE_EVENT_OP_NEW,
492                         .queue_id = wrk_enq,
493                         .mbuf = arp,
494         };
495
496         if (!arp) {
497                 printf("%d: gen of pkt failed\n", __LINE__);
498                 return -1;
499         }
500
501         const uint32_t MAGIC_SEQN = 4711;
502         arp->seqn = MAGIC_SEQN;
503
504         /* generate pkt and enqueue */
505         err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
506         if (err < 0) {
507                 printf("%d: error failed to enqueue\n", __LINE__);
508                 return -1;
509         }
510
511         /* Run schedule() as dir packets may need to be re-ordered */
512         rte_service_run_iter_on_app_lcore(t->service_id);
513
514         struct test_event_dev_stats stats;
515         err = test_event_dev_stats_get(evdev, &stats);
516         if (err) {
517                 printf("%d: error failed to get stats\n", __LINE__);
518                 return -1;
519         }
520
521         if (stats.port_rx_pkts[rx_enq] != 1) {
522                 printf("%d: error stats incorrect for directed port\n",
523                                 __LINE__);
524                 return -1;
525         }
526
527         uint32_t deq_pkts;
528         deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
529         if (deq_pkts != 1) {
530                 printf("%d: error failed to deq\n", __LINE__);
531                 return -1;
532         }
533
534         err = test_event_dev_stats_get(evdev, &stats);
535         if (stats.port_rx_pkts[wrk_enq] != 0 &&
536                         stats.port_rx_pkts[wrk_enq] != 1) {
537                 printf("%d: error directed stats post-dequeue\n", __LINE__);
538                 return -1;
539         }
540
541         if (ev.mbuf->seqn != MAGIC_SEQN) {
542                 printf("%d: error magic sequence number not dequeued\n",
543                                 __LINE__);
544                 return -1;
545         }
546
547         rte_pktmbuf_free(ev.mbuf);
548         cleanup(t);
549         return 0;
550 }
551
552 static int
553 test_directed_forward_credits(struct test *t)
554 {
555         uint32_t i;
556         int32_t err;
557
558         if (init(t, 1, 1) < 0 ||
559                         create_ports(t, 1) < 0 ||
560                         create_directed_qids(t, 1, t->port) < 0)
561                 return -1;
562
563         if (rte_event_dev_start(evdev) < 0) {
564                 printf("%d: Error with start call\n", __LINE__);
565                 return -1;
566         }
567
568         struct rte_event ev = {
569                         .op = RTE_EVENT_OP_NEW,
570                         .queue_id = 0,
571         };
572
573         for (i = 0; i < 1000; i++) {
574                 err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
575                 if (err < 0) {
576                         printf("%d: error failed to enqueue\n", __LINE__);
577                         return -1;
578                 }
579                 rte_service_run_iter_on_app_lcore(t->service_id);
580
581                 uint32_t deq_pkts;
582                 deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
583                 if (deq_pkts != 1) {
584                         printf("%d: error failed to deq\n", __LINE__);
585                         return -1;
586                 }
587
588                 /* re-write event to be a forward, and continue looping it */
589                 ev.op = RTE_EVENT_OP_FORWARD;
590         }
591
592         cleanup(t);
593         return 0;
594 }
595
596
597 static int
598 test_priority_directed(struct test *t)
599 {
600         if (init(t, 1, 1) < 0 ||
601                         create_ports(t, 1) < 0 ||
602                         create_directed_qids(t, 1, t->port) < 0) {
603                 printf("%d: Error initializing device\n", __LINE__);
604                 return -1;
605         }
606
607         if (rte_event_dev_start(evdev) < 0) {
608                 printf("%d: Error with start call\n", __LINE__);
609                 return -1;
610         }
611
612         return run_prio_packet_test(t);
613 }
614
615 static int
616 test_priority_atomic(struct test *t)
617 {
618         if (init(t, 1, 1) < 0 ||
619                         create_ports(t, 1) < 0 ||
620                         create_atomic_qids(t, 1) < 0) {
621                 printf("%d: Error initializing device\n", __LINE__);
622                 return -1;
623         }
624
625         /* map the QID */
626         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
627                 printf("%d: error mapping qid to port\n", __LINE__);
628                 return -1;
629         }
630         if (rte_event_dev_start(evdev) < 0) {
631                 printf("%d: Error with start call\n", __LINE__);
632                 return -1;
633         }
634
635         return run_prio_packet_test(t);
636 }
637
638 static int
639 test_priority_ordered(struct test *t)
640 {
641         if (init(t, 1, 1) < 0 ||
642                         create_ports(t, 1) < 0 ||
643                         create_ordered_qids(t, 1) < 0) {
644                 printf("%d: Error initializing device\n", __LINE__);
645                 return -1;
646         }
647
648         /* map the QID */
649         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
650                 printf("%d: error mapping qid to port\n", __LINE__);
651                 return -1;
652         }
653         if (rte_event_dev_start(evdev) < 0) {
654                 printf("%d: Error with start call\n", __LINE__);
655                 return -1;
656         }
657
658         return run_prio_packet_test(t);
659 }
660
661 static int
662 test_priority_unordered(struct test *t)
663 {
664         if (init(t, 1, 1) < 0 ||
665                         create_ports(t, 1) < 0 ||
666                         create_unordered_qids(t, 1) < 0) {
667                 printf("%d: Error initializing device\n", __LINE__);
668                 return -1;
669         }
670
671         /* map the QID */
672         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
673                 printf("%d: error mapping qid to port\n", __LINE__);
674                 return -1;
675         }
676         if (rte_event_dev_start(evdev) < 0) {
677                 printf("%d: Error with start call\n", __LINE__);
678                 return -1;
679         }
680
681         return run_prio_packet_test(t);
682 }
683
684 static int
685 burst_packets(struct test *t)
686 {
687         /************** CONFIG ****************/
688         uint32_t i;
689         int err;
690         int ret;
691
692         /* Create instance with 2 ports and 2 queues */
693         if (init(t, 2, 2) < 0 ||
694                         create_ports(t, 2) < 0 ||
695                         create_atomic_qids(t, 2) < 0) {
696                 printf("%d: Error initializing device\n", __LINE__);
697                 return -1;
698         }
699
700         /* CQ mapping to QID */
701         ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
702         if (ret != 1) {
703                 printf("%d: error mapping lb qid0\n", __LINE__);
704                 return -1;
705         }
706         ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
707         if (ret != 1) {
708                 printf("%d: error mapping lb qid1\n", __LINE__);
709                 return -1;
710         }
711
712         if (rte_event_dev_start(evdev) < 0) {
713                 printf("%d: Error with start call\n", __LINE__);
714                 return -1;
715         }
716
717         /************** FORWARD ****************/
718         const uint32_t rx_port = 0;
719         const uint32_t NUM_PKTS = 2;
720
721         for (i = 0; i < NUM_PKTS; i++) {
722                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
723                 if (!arp) {
724                         printf("%d: error generating pkt\n", __LINE__);
725                         return -1;
726                 }
727
728                 struct rte_event ev = {
729                                 .op = RTE_EVENT_OP_NEW,
730                                 .queue_id = i % 2,
731                                 .flow_id = i % 3,
732                                 .mbuf = arp,
733                 };
734                 /* generate pkt and enqueue */
735                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
736                 if (err < 0) {
737                         printf("%d: Failed to enqueue\n", __LINE__);
738                         return -1;
739                 }
740         }
741         rte_service_run_iter_on_app_lcore(t->service_id);
742
743         /* Check stats for all NUM_PKTS arrived to sched core */
744         struct test_event_dev_stats stats;
745
746         err = test_event_dev_stats_get(evdev, &stats);
747         if (err) {
748                 printf("%d: failed to get stats\n", __LINE__);
749                 return -1;
750         }
751         if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
752                 printf("%d: Sched core didn't receive all %d pkts\n",
753                                 __LINE__, NUM_PKTS);
754                 rte_event_dev_dump(evdev, stdout);
755                 return -1;
756         }
757
758         uint32_t deq_pkts;
759         int p;
760
761         deq_pkts = 0;
762         /******** DEQ QID 1 *******/
763         do {
764                 struct rte_event ev;
765                 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
766                 deq_pkts += p;
767                 rte_pktmbuf_free(ev.mbuf);
768         } while (p);
769
770         if (deq_pkts != NUM_PKTS/2) {
771                 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
772                                 __LINE__);
773                 return -1;
774         }
775
776         /******** DEQ QID 2 *******/
777         deq_pkts = 0;
778         do {
779                 struct rte_event ev;
780                 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
781                 deq_pkts += p;
782                 rte_pktmbuf_free(ev.mbuf);
783         } while (p);
784         if (deq_pkts != NUM_PKTS/2) {
785                 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
786                                 __LINE__);
787                 return -1;
788         }
789
790         cleanup(t);
791         return 0;
792 }
793
794 static int
795 abuse_inflights(struct test *t)
796 {
797         const int rx_enq = 0;
798         const int wrk_enq = 2;
799         int err;
800
801         /* Create instance with 4 ports */
802         if (init(t, 1, 4) < 0 ||
803                         create_ports(t, 4) < 0 ||
804                         create_atomic_qids(t, 1) < 0) {
805                 printf("%d: Error initializing device\n", __LINE__);
806                 return -1;
807         }
808
809         /* CQ mapping to QID */
810         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
811         if (err != 1) {
812                 printf("%d: error mapping lb qid\n", __LINE__);
813                 cleanup(t);
814                 return -1;
815         }
816
817         if (rte_event_dev_start(evdev) < 0) {
818                 printf("%d: Error with start call\n", __LINE__);
819                 return -1;
820         }
821
822         /* Enqueue op only */
823         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
824         if (err < 0) {
825                 printf("%d: Failed to enqueue\n", __LINE__);
826                 return -1;
827         }
828
829         /* schedule */
830         rte_service_run_iter_on_app_lcore(t->service_id);
831
832         struct test_event_dev_stats stats;
833
834         err = test_event_dev_stats_get(evdev, &stats);
835         if (err) {
836                 printf("%d: failed to get stats\n", __LINE__);
837                 return -1;
838         }
839
840         if (stats.rx_pkts != 0 ||
841                         stats.tx_pkts != 0 ||
842                         stats.port_inflight[wrk_enq] != 0) {
843                 printf("%d: Sched core didn't handle pkt as expected\n",
844                                 __LINE__);
845                 return -1;
846         }
847
848         cleanup(t);
849         return 0;
850 }
851
852 static int
853 xstats_tests(struct test *t)
854 {
855         const int wrk_enq = 2;
856         int err;
857
858         /* Create instance with 4 ports */
859         if (init(t, 1, 4) < 0 ||
860                         create_ports(t, 4) < 0 ||
861                         create_atomic_qids(t, 1) < 0) {
862                 printf("%d: Error initializing device\n", __LINE__);
863                 return -1;
864         }
865
866         /* CQ mapping to QID */
867         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
868         if (err != 1) {
869                 printf("%d: error mapping lb qid\n", __LINE__);
870                 cleanup(t);
871                 return -1;
872         }
873
874         if (rte_event_dev_start(evdev) < 0) {
875                 printf("%d: Error with start call\n", __LINE__);
876                 return -1;
877         }
878
879         const uint32_t XSTATS_MAX = 1024;
880
881         uint32_t i;
882         uint32_t ids[XSTATS_MAX];
883         uint64_t values[XSTATS_MAX];
884         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
885
886         for (i = 0; i < XSTATS_MAX; i++)
887                 ids[i] = i;
888
889         /* Device names / values */
890         int ret = rte_event_dev_xstats_names_get(evdev,
891                                         RTE_EVENT_DEV_XSTATS_DEVICE,
892                                         0, xstats_names, ids, XSTATS_MAX);
893         if (ret != 6) {
894                 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
895                 return -1;
896         }
897         ret = rte_event_dev_xstats_get(evdev,
898                                         RTE_EVENT_DEV_XSTATS_DEVICE,
899                                         0, ids, values, ret);
900         if (ret != 6) {
901                 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
902                 return -1;
903         }
904
905         /* Port names / values */
906         ret = rte_event_dev_xstats_names_get(evdev,
907                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
908                                         xstats_names, ids, XSTATS_MAX);
909         if (ret != 21) {
910                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
911                 return -1;
912         }
913         ret = rte_event_dev_xstats_get(evdev,
914                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
915                                         ids, values, ret);
916         if (ret != 21) {
917                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
918                 return -1;
919         }
920
921         /* Queue names / values */
922         ret = rte_event_dev_xstats_names_get(evdev,
923                                         RTE_EVENT_DEV_XSTATS_QUEUE,
924                                         0, xstats_names, ids, XSTATS_MAX);
925         if (ret != 17) {
926                 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
927                 return -1;
928         }
929
930         /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
931         ret = rte_event_dev_xstats_get(evdev,
932                                         RTE_EVENT_DEV_XSTATS_QUEUE,
933                                         1, ids, values, ret);
934         if (ret != -EINVAL) {
935                 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
936                 return -1;
937         }
938
939         ret = rte_event_dev_xstats_get(evdev,
940                                         RTE_EVENT_DEV_XSTATS_QUEUE,
941                                         0, ids, values, ret);
942         if (ret != 17) {
943                 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
944                 return -1;
945         }
946
947         /* enqueue packets to check values */
948         for (i = 0; i < 3; i++) {
949                 struct rte_event ev;
950                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
951                 if (!arp) {
952                         printf("%d: gen of pkt failed\n", __LINE__);
953                         return -1;
954                 }
955                 ev.queue_id = t->qid[i];
956                 ev.op = RTE_EVENT_OP_NEW;
957                 ev.mbuf = arp;
958                 ev.flow_id = 7;
959                 arp->seqn = i;
960
961                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
962                 if (err != 1) {
963                         printf("%d: Failed to enqueue\n", __LINE__);
964                         return -1;
965                 }
966         }
967
968         rte_service_run_iter_on_app_lcore(t->service_id);
969
970         /* Device names / values */
971         int num_stats = rte_event_dev_xstats_names_get(evdev,
972                                         RTE_EVENT_DEV_XSTATS_DEVICE, 0,
973                                         xstats_names, ids, XSTATS_MAX);
974         if (num_stats < 0)
975                 goto fail;
976         ret = rte_event_dev_xstats_get(evdev,
977                                         RTE_EVENT_DEV_XSTATS_DEVICE,
978                                         0, ids, values, num_stats);
979         static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
980         for (i = 0; (signed int)i < ret; i++) {
981                 if (expected[i] != values[i]) {
982                         printf(
983                                 "%d Error xstat %d (id %d) %s : %"PRIu64
984                                 ", expect %"PRIu64"\n",
985                                 __LINE__, i, ids[i], xstats_names[i].name,
986                                 values[i], expected[i]);
987                         goto fail;
988                 }
989         }
990
991         ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
992                                         0, NULL, 0);
993
994         /* ensure reset statistics are zero-ed */
995         static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
996         ret = rte_event_dev_xstats_get(evdev,
997                                         RTE_EVENT_DEV_XSTATS_DEVICE,
998                                         0, ids, values, num_stats);
999         for (i = 0; (signed int)i < ret; i++) {
1000                 if (expected_zero[i] != values[i]) {
1001                         printf(
1002                                 "%d Error, xstat %d (id %d) %s : %"PRIu64
1003                                 ", expect %"PRIu64"\n",
1004                                 __LINE__, i, ids[i], xstats_names[i].name,
1005                                 values[i], expected_zero[i]);
1006                         goto fail;
1007                 }
1008         }
1009
1010         /* port reset checks */
1011         num_stats = rte_event_dev_xstats_names_get(evdev,
1012                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
1013                                         xstats_names, ids, XSTATS_MAX);
1014         if (num_stats < 0)
1015                 goto fail;
1016         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1017                                         0, ids, values, num_stats);
1018
1019         static const uint64_t port_expected[] = {
1020                 3 /* rx */,
1021                 0 /* tx */,
1022                 0 /* drop */,
1023                 0 /* inflights */,
1024                 0 /* avg pkt cycles */,
1025                 29 /* credits */,
1026                 0 /* rx ring used */,
1027                 4096 /* rx ring free */,
1028                 0 /* cq ring used */,
1029                 32 /* cq ring free */,
1030                 0 /* dequeue calls */,
1031                 /* 10 dequeue burst buckets */
1032                 0, 0, 0, 0, 0,
1033                 0, 0, 0, 0, 0,
1034         };
1035         if (ret != RTE_DIM(port_expected)) {
1036                 printf(
1037                         "%s %d: wrong number of port stats (%d), expected %zu\n",
1038                         __func__, __LINE__, ret, RTE_DIM(port_expected));
1039         }
1040
1041         for (i = 0; (signed int)i < ret; i++) {
1042                 if (port_expected[i] != values[i]) {
1043                         printf(
1044                                 "%s : %d: Error stat %s is %"PRIu64
1045                                 ", expected %"PRIu64"\n",
1046                                 __func__, __LINE__, xstats_names[i].name,
1047                                 values[i], port_expected[i]);
1048                         goto fail;
1049                 }
1050         }
1051
1052         ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1053                                         0, NULL, 0);
1054
1055         /* ensure reset statistics are zero-ed */
1056         static const uint64_t port_expected_zero[] = {
1057                 0 /* rx */,
1058                 0 /* tx */,
1059                 0 /* drop */,
1060                 0 /* inflights */,
1061                 0 /* avg pkt cycles */,
1062                 29 /* credits */,
1063                 0 /* rx ring used */,
1064                 4096 /* rx ring free */,
1065                 0 /* cq ring used */,
1066                 32 /* cq ring free */,
1067                 0 /* dequeue calls */,
1068                 /* 10 dequeue burst buckets */
1069                 0, 0, 0, 0, 0,
1070                 0, 0, 0, 0, 0,
1071         };
1072         ret = rte_event_dev_xstats_get(evdev,
1073                                         RTE_EVENT_DEV_XSTATS_PORT,
1074                                         0, ids, values, num_stats);
1075         for (i = 0; (signed int)i < ret; i++) {
1076                 if (port_expected_zero[i] != values[i]) {
1077                         printf(
1078                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1079                                 ", expect %"PRIu64"\n",
1080                                 __LINE__, i, ids[i], xstats_names[i].name,
1081                                 values[i], port_expected_zero[i]);
1082                         goto fail;
1083                 }
1084         }
1085
1086         /* QUEUE STATS TESTS */
1087         num_stats = rte_event_dev_xstats_names_get(evdev,
1088                                                 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1089                                                 xstats_names, ids, XSTATS_MAX);
1090         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1091                                         0, ids, values, num_stats);
1092         if (ret < 0) {
1093                 printf("xstats get returned %d\n", ret);
1094                 goto fail;
1095         }
1096         if ((unsigned int)ret > XSTATS_MAX)
1097                 printf("%s %d: more xstats available than space\n",
1098                                 __func__, __LINE__);
1099
1100         static const uint64_t queue_expected[] = {
1101                 3 /* rx */,
1102                 3 /* tx */,
1103                 0 /* drop */,
1104                 3 /* inflights */,
1105                 512 /* iq size */,
1106                 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1107                 /* QID-to-Port: pinned_flows, packets */
1108                 0, 0,
1109                 0, 0,
1110                 1, 3,
1111                 0, 0,
1112         };
1113         for (i = 0; (signed int)i < ret; i++) {
1114                 if (queue_expected[i] != values[i]) {
1115                         printf(
1116                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1117                                 ", expect %"PRIu64"\n",
1118                                 __LINE__, i, ids[i], xstats_names[i].name,
1119                                 values[i], queue_expected[i]);
1120                         goto fail;
1121                 }
1122         }
1123
1124         /* Reset the queue stats here */
1125         ret = rte_event_dev_xstats_reset(evdev,
1126                                         RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1127                                         NULL,
1128                                         0);
1129
1130         /* Verify that the resetable stats are reset, and others are not */
1131         static const uint64_t queue_expected_zero[] = {
1132                 0 /* rx */,
1133                 0 /* tx */,
1134                 0 /* drop */,
1135                 3 /* inflight */,
1136                 512 /* iq size */,
1137                 0, 0, 0, 0, /* 4 iq used */
1138                 /* QID-to-Port: pinned_flows, packets */
1139                 0, 0,
1140                 0, 0,
1141                 1, 0,
1142                 0, 0,
1143         };
1144
1145         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1146                                         ids, values, num_stats);
1147         int fails = 0;
1148         for (i = 0; (signed int)i < ret; i++) {
1149                 if (queue_expected_zero[i] != values[i]) {
1150                         printf(
1151                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1152                                 ", expect %"PRIu64"\n",
1153                                 __LINE__, i, ids[i], xstats_names[i].name,
1154                                 values[i], queue_expected_zero[i]);
1155                         fails++;
1156                 }
1157         }
1158         if (fails) {
1159                 printf("%d : %d of values were not as expected above\n",
1160                                 __LINE__, fails);
1161                 goto fail;
1162         }
1163
1164         cleanup(t);
1165         return 0;
1166
1167 fail:
1168         rte_event_dev_dump(0, stdout);
1169         cleanup(t);
1170         return -1;
1171 }
1172
1173
1174 static int
1175 xstats_id_abuse_tests(struct test *t)
1176 {
1177         int err;
1178         const uint32_t XSTATS_MAX = 1024;
1179         const uint32_t link_port = 2;
1180
1181         uint32_t ids[XSTATS_MAX];
1182         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1183
1184         /* Create instance with 4 ports */
1185         if (init(t, 1, 4) < 0 ||
1186                         create_ports(t, 4) < 0 ||
1187                         create_atomic_qids(t, 1) < 0) {
1188                 printf("%d: Error initializing device\n", __LINE__);
1189                 goto fail;
1190         }
1191
1192         err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1193         if (err != 1) {
1194                 printf("%d: error mapping lb qid\n", __LINE__);
1195                 goto fail;
1196         }
1197
1198         if (rte_event_dev_start(evdev) < 0) {
1199                 printf("%d: Error with start call\n", __LINE__);
1200                 goto fail;
1201         }
1202
1203         /* no test for device, as it ignores the port/q number */
1204         int num_stats = rte_event_dev_xstats_names_get(evdev,
1205                                         RTE_EVENT_DEV_XSTATS_PORT,
1206                                         UINT8_MAX-1, xstats_names, ids,
1207                                         XSTATS_MAX);
1208         if (num_stats != 0) {
1209                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1210                                 0, num_stats);
1211                 goto fail;
1212         }
1213
1214         num_stats = rte_event_dev_xstats_names_get(evdev,
1215                                         RTE_EVENT_DEV_XSTATS_QUEUE,
1216                                         UINT8_MAX-1, xstats_names, ids,
1217                                         XSTATS_MAX);
1218         if (num_stats != 0) {
1219                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1220                                 0, num_stats);
1221                 goto fail;
1222         }
1223
1224         cleanup(t);
1225         return 0;
1226 fail:
1227         cleanup(t);
1228         return -1;
1229 }
1230
1231 static int
1232 port_reconfig_credits(struct test *t)
1233 {
1234         if (init(t, 1, 1) < 0) {
1235                 printf("%d: Error initializing device\n", __LINE__);
1236                 return -1;
1237         }
1238
1239         uint32_t i;
1240         const uint32_t NUM_ITERS = 32;
1241         for (i = 0; i < NUM_ITERS; i++) {
1242                 const struct rte_event_queue_conf conf = {
1243                         .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1244                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1245                         .nb_atomic_flows = 1024,
1246                         .nb_atomic_order_sequences = 1024,
1247                 };
1248                 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1249                         printf("%d: error creating qid\n", __LINE__);
1250                         return -1;
1251                 }
1252                 t->qid[0] = 0;
1253
1254                 static const struct rte_event_port_conf port_conf = {
1255                                 .new_event_threshold = 128,
1256                                 .dequeue_depth = 32,
1257                                 .enqueue_depth = 64,
1258                 };
1259                 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1260                         printf("%d Error setting up port\n", __LINE__);
1261                         return -1;
1262                 }
1263
1264                 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1265                 if (links != 1) {
1266                         printf("%d: error mapping lb qid\n", __LINE__);
1267                         goto fail;
1268                 }
1269
1270                 if (rte_event_dev_start(evdev) < 0) {
1271                         printf("%d: Error with start call\n", __LINE__);
1272                         goto fail;
1273                 }
1274
1275                 const uint32_t NPKTS = 1;
1276                 uint32_t j;
1277                 for (j = 0; j < NPKTS; j++) {
1278                         struct rte_event ev;
1279                         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1280                         if (!arp) {
1281                                 printf("%d: gen of pkt failed\n", __LINE__);
1282                                 goto fail;
1283                         }
1284                         ev.queue_id = t->qid[0];
1285                         ev.op = RTE_EVENT_OP_NEW;
1286                         ev.mbuf = arp;
1287                         int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1288                         if (err != 1) {
1289                                 printf("%d: Failed to enqueue\n", __LINE__);
1290                                 rte_event_dev_dump(0, stdout);
1291                                 goto fail;
1292                         }
1293                 }
1294
1295                 rte_service_run_iter_on_app_lcore(t->service_id);
1296
1297                 struct rte_event ev[NPKTS];
1298                 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1299                                                         NPKTS, 0);
1300                 if (deq != 1)
1301                         printf("%d error; no packet dequeued\n", __LINE__);
1302
1303                 /* let cleanup below stop the device on last iter */
1304                 if (i != NUM_ITERS-1)
1305                         rte_event_dev_stop(evdev);
1306         }
1307
1308         cleanup(t);
1309         return 0;
1310 fail:
1311         cleanup(t);
1312         return -1;
1313 }
1314
1315 static int
1316 port_single_lb_reconfig(struct test *t)
1317 {
1318         if (init(t, 2, 2) < 0) {
1319                 printf("%d: Error initializing device\n", __LINE__);
1320                 goto fail;
1321         }
1322
1323         static const struct rte_event_queue_conf conf_lb_atomic = {
1324                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1325                 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1326                 .nb_atomic_flows = 1024,
1327                 .nb_atomic_order_sequences = 1024,
1328         };
1329         if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1330                 printf("%d: error creating qid\n", __LINE__);
1331                 goto fail;
1332         }
1333
1334         static const struct rte_event_queue_conf conf_single_link = {
1335                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1336                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1337         };
1338         if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1339                 printf("%d: error creating qid\n", __LINE__);
1340                 goto fail;
1341         }
1342
1343         struct rte_event_port_conf port_conf = {
1344                 .new_event_threshold = 128,
1345                 .dequeue_depth = 32,
1346                 .enqueue_depth = 64,
1347         };
1348         if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1349                 printf("%d Error setting up port\n", __LINE__);
1350                 goto fail;
1351         }
1352         if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1353                 printf("%d Error setting up port\n", __LINE__);
1354                 goto fail;
1355         }
1356
1357         /* link port to lb queue */
1358         uint8_t queue_id = 0;
1359         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1360                 printf("%d: error creating link for qid\n", __LINE__);
1361                 goto fail;
1362         }
1363
1364         int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1365         if (ret != 1) {
1366                 printf("%d: Error unlinking lb port\n", __LINE__);
1367                 goto fail;
1368         }
1369
1370         queue_id = 1;
1371         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1372                 printf("%d: error creating link for qid\n", __LINE__);
1373                 goto fail;
1374         }
1375
1376         queue_id = 0;
1377         int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1378         if (err != 1) {
1379                 printf("%d: error mapping lb qid\n", __LINE__);
1380                 goto fail;
1381         }
1382
1383         if (rte_event_dev_start(evdev) < 0) {
1384                 printf("%d: Error with start call\n", __LINE__);
1385                 goto fail;
1386         }
1387
1388         cleanup(t);
1389         return 0;
1390 fail:
1391         cleanup(t);
1392         return -1;
1393 }
1394
1395 static int
1396 xstats_brute_force(struct test *t)
1397 {
1398         uint32_t i;
1399         const uint32_t XSTATS_MAX = 1024;
1400         uint32_t ids[XSTATS_MAX];
1401         uint64_t values[XSTATS_MAX];
1402         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1403
1404
1405         /* Create instance with 4 ports */
1406         if (init(t, 1, 4) < 0 ||
1407                         create_ports(t, 4) < 0 ||
1408                         create_atomic_qids(t, 1) < 0) {
1409                 printf("%d: Error initializing device\n", __LINE__);
1410                 return -1;
1411         }
1412
1413         int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1414         if (err != 1) {
1415                 printf("%d: error mapping lb qid\n", __LINE__);
1416                 goto fail;
1417         }
1418
1419         if (rte_event_dev_start(evdev) < 0) {
1420                 printf("%d: Error with start call\n", __LINE__);
1421                 goto fail;
1422         }
1423
1424         for (i = 0; i < XSTATS_MAX; i++)
1425                 ids[i] = i;
1426
1427         for (i = 0; i < 3; i++) {
1428                 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1429                 uint32_t j;
1430                 for (j = 0; j < UINT8_MAX; j++) {
1431                         rte_event_dev_xstats_names_get(evdev, mode,
1432                                 j, xstats_names, ids, XSTATS_MAX);
1433
1434                         rte_event_dev_xstats_get(evdev, mode, j, ids,
1435                                                  values, XSTATS_MAX);
1436                 }
1437         }
1438
1439         cleanup(t);
1440         return 0;
1441 fail:
1442         cleanup(t);
1443         return -1;
1444 }
1445
1446 static int
1447 xstats_id_reset_tests(struct test *t)
1448 {
1449         const int wrk_enq = 2;
1450         int err;
1451
1452         /* Create instance with 4 ports */
1453         if (init(t, 1, 4) < 0 ||
1454                         create_ports(t, 4) < 0 ||
1455                         create_atomic_qids(t, 1) < 0) {
1456                 printf("%d: Error initializing device\n", __LINE__);
1457                 return -1;
1458         }
1459
1460         /* CQ mapping to QID */
1461         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1462         if (err != 1) {
1463                 printf("%d: error mapping lb qid\n", __LINE__);
1464                 goto fail;
1465         }
1466
1467         if (rte_event_dev_start(evdev) < 0) {
1468                 printf("%d: Error with start call\n", __LINE__);
1469                 goto fail;
1470         }
1471
1472 #define XSTATS_MAX 1024
1473         int ret;
1474         uint32_t i;
1475         uint32_t ids[XSTATS_MAX];
1476         uint64_t values[XSTATS_MAX];
1477         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1478
1479         for (i = 0; i < XSTATS_MAX; i++)
1480                 ids[i] = i;
1481
1482 #define NUM_DEV_STATS 6
1483         /* Device names / values */
1484         int num_stats = rte_event_dev_xstats_names_get(evdev,
1485                                         RTE_EVENT_DEV_XSTATS_DEVICE,
1486                                         0, xstats_names, ids, XSTATS_MAX);
1487         if (num_stats != NUM_DEV_STATS) {
1488                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1489                                 NUM_DEV_STATS, num_stats);
1490                 goto fail;
1491         }
1492         ret = rte_event_dev_xstats_get(evdev,
1493                                         RTE_EVENT_DEV_XSTATS_DEVICE,
1494                                         0, ids, values, num_stats);
1495         if (ret != NUM_DEV_STATS) {
1496                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1497                                 NUM_DEV_STATS, ret);
1498                 goto fail;
1499         }
1500
1501 #define NPKTS 7
1502         for (i = 0; i < NPKTS; i++) {
1503                 struct rte_event ev;
1504                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1505                 if (!arp) {
1506                         printf("%d: gen of pkt failed\n", __LINE__);
1507                         goto fail;
1508                 }
1509                 ev.queue_id = t->qid[i];
1510                 ev.op = RTE_EVENT_OP_NEW;
1511                 ev.mbuf = arp;
1512                 arp->seqn = i;
1513
1514                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1515                 if (err != 1) {
1516                         printf("%d: Failed to enqueue\n", __LINE__);
1517                         goto fail;
1518                 }
1519         }
1520
1521         rte_service_run_iter_on_app_lcore(t->service_id);
1522
1523         static const char * const dev_names[] = {
1524                 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1525                 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1526         };
1527         uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1528         for (i = 0; (int)i < ret; i++) {
1529                 unsigned int id;
1530                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1531                                                                 dev_names[i],
1532                                                                 &id);
1533                 if (id != i) {
1534                         printf("%d: %s id incorrect, expected %d got %d\n",
1535                                         __LINE__, dev_names[i], i, id);
1536                         goto fail;
1537                 }
1538                 if (val != dev_expected[i]) {
1539                         printf("%d: %s value incorrect, expected %"
1540                                 PRIu64" got %d\n", __LINE__, dev_names[i],
1541                                 dev_expected[i], id);
1542                         goto fail;
1543                 }
1544                 /* reset to zero */
1545                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1546                                                 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1547                                                 &id,
1548                                                 1);
1549                 if (reset_ret) {
1550                         printf("%d: failed to reset successfully\n", __LINE__);
1551                         goto fail;
1552                 }
1553                 dev_expected[i] = 0;
1554                 /* check value again */
1555                 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1556                 if (val != dev_expected[i]) {
1557                         printf("%d: %s value incorrect, expected %"PRIu64
1558                                 " got %"PRIu64"\n", __LINE__, dev_names[i],
1559                                 dev_expected[i], val);
1560                         goto fail;
1561                 }
1562         };
1563
1564 /* 48 is stat offset from start of the devices whole xstats.
1565  * This WILL break every time we add a statistic to a port
1566  * or the device, but there is no other way to test
1567  */
1568 #define PORT_OFF 48
1569 /* num stats for the tested port. CQ size adds more stats to a port */
1570 #define NUM_PORT_STATS 21
1571 /* the port to test. */
1572 #define PORT 2
1573         num_stats = rte_event_dev_xstats_names_get(evdev,
1574                                         RTE_EVENT_DEV_XSTATS_PORT, PORT,
1575                                         xstats_names, ids, XSTATS_MAX);
1576         if (num_stats != NUM_PORT_STATS) {
1577                 printf("%d: expected %d stats, got return %d\n",
1578                         __LINE__, NUM_PORT_STATS, num_stats);
1579                 goto fail;
1580         }
1581         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1582                                         ids, values, num_stats);
1583
1584         if (ret != NUM_PORT_STATS) {
1585                 printf("%d: expected %d stats, got return %d\n",
1586                                 __LINE__, NUM_PORT_STATS, ret);
1587                 goto fail;
1588         }
1589         static const char * const port_names[] = {
1590                 "port_2_rx",
1591                 "port_2_tx",
1592                 "port_2_drop",
1593                 "port_2_inflight",
1594                 "port_2_avg_pkt_cycles",
1595                 "port_2_credits",
1596                 "port_2_rx_ring_used",
1597                 "port_2_rx_ring_free",
1598                 "port_2_cq_ring_used",
1599                 "port_2_cq_ring_free",
1600                 "port_2_dequeue_calls",
1601                 "port_2_dequeues_returning_0",
1602                 "port_2_dequeues_returning_1-4",
1603                 "port_2_dequeues_returning_5-8",
1604                 "port_2_dequeues_returning_9-12",
1605                 "port_2_dequeues_returning_13-16",
1606                 "port_2_dequeues_returning_17-20",
1607                 "port_2_dequeues_returning_21-24",
1608                 "port_2_dequeues_returning_25-28",
1609                 "port_2_dequeues_returning_29-32",
1610                 "port_2_dequeues_returning_33-36",
1611         };
1612         uint64_t port_expected[] = {
1613                 0, /* rx */
1614                 NPKTS, /* tx */
1615                 0, /* drop */
1616                 NPKTS, /* inflight */
1617                 0, /* avg pkt cycles */
1618                 0, /* credits */
1619                 0, /* rx ring used */
1620                 4096, /* rx ring free */
1621                 NPKTS,  /* cq ring used */
1622                 25, /* cq ring free */
1623                 0, /* dequeue zero calls */
1624                 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1625                 0, 0, 0, 0, 0,
1626         };
1627         uint64_t port_expected_zero[] = {
1628                 0, /* rx */
1629                 0, /* tx */
1630                 0, /* drop */
1631                 NPKTS, /* inflight */
1632                 0, /* avg pkt cycles */
1633                 0, /* credits */
1634                 0, /* rx ring used */
1635                 4096, /* rx ring free */
1636                 NPKTS,  /* cq ring used */
1637                 25, /* cq ring free */
1638                 0, /* dequeue zero calls */
1639                 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1640                 0, 0, 0, 0, 0,
1641         };
1642         if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1643                         RTE_DIM(port_names) != NUM_PORT_STATS) {
1644                 printf("%d: port array of wrong size\n", __LINE__);
1645                 goto fail;
1646         }
1647
1648         int failed = 0;
1649         for (i = 0; (int)i < ret; i++) {
1650                 unsigned int id;
1651                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1652                                                                 port_names[i],
1653                                                                 &id);
1654                 if (id != i + PORT_OFF) {
1655                         printf("%d: %s id incorrect, expected %d got %d\n",
1656                                         __LINE__, port_names[i], i+PORT_OFF,
1657                                         id);
1658                         failed = 1;
1659                 }
1660                 if (val != port_expected[i]) {
1661                         printf("%d: %s value incorrect, expected %"PRIu64
1662                                 " got %d\n", __LINE__, port_names[i],
1663                                 port_expected[i], id);
1664                         failed = 1;
1665                 }
1666                 /* reset to zero */
1667                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1668                                                 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1669                                                 &id,
1670                                                 1);
1671                 if (reset_ret) {
1672                         printf("%d: failed to reset successfully\n", __LINE__);
1673                         failed = 1;
1674                 }
1675                 /* check value again */
1676                 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1677                 if (val != port_expected_zero[i]) {
1678                         printf("%d: %s value incorrect, expected %"PRIu64
1679                                 " got %"PRIu64"\n", __LINE__, port_names[i],
1680                                 port_expected_zero[i], val);
1681                         failed = 1;
1682                 }
1683         };
1684         if (failed)
1685                 goto fail;
1686
1687 /* num queue stats */
1688 #define NUM_Q_STATS 17
1689 /* queue offset from start of the devices whole xstats.
1690  * This will break every time we add a statistic to a device/port/queue
1691  */
1692 #define QUEUE_OFF 90
1693         const uint32_t queue = 0;
1694         num_stats = rte_event_dev_xstats_names_get(evdev,
1695                                         RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1696                                         xstats_names, ids, XSTATS_MAX);
1697         if (num_stats != NUM_Q_STATS) {
1698                 printf("%d: expected %d stats, got return %d\n",
1699                         __LINE__, NUM_Q_STATS, num_stats);
1700                 goto fail;
1701         }
1702         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1703                                         queue, ids, values, num_stats);
1704         if (ret != NUM_Q_STATS) {
1705                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1706                 goto fail;
1707         }
1708         static const char * const queue_names[] = {
1709                 "qid_0_rx",
1710                 "qid_0_tx",
1711                 "qid_0_drop",
1712                 "qid_0_inflight",
1713                 "qid_0_iq_size",
1714                 "qid_0_iq_0_used",
1715                 "qid_0_iq_1_used",
1716                 "qid_0_iq_2_used",
1717                 "qid_0_iq_3_used",
1718                 "qid_0_port_0_pinned_flows",
1719                 "qid_0_port_0_packets",
1720                 "qid_0_port_1_pinned_flows",
1721                 "qid_0_port_1_packets",
1722                 "qid_0_port_2_pinned_flows",
1723                 "qid_0_port_2_packets",
1724                 "qid_0_port_3_pinned_flows",
1725                 "qid_0_port_3_packets",
1726         };
1727         uint64_t queue_expected[] = {
1728                 7, /* rx */
1729                 7, /* tx */
1730                 0, /* drop */
1731                 7, /* inflight */
1732                 512, /* iq size */
1733                 0, /* iq 0 used */
1734                 0, /* iq 1 used */
1735                 0, /* iq 2 used */
1736                 0, /* iq 3 used */
1737                 /* QID-to-Port: pinned_flows, packets */
1738                 0, 0,
1739                 0, 0,
1740                 1, 7,
1741                 0, 0,
1742         };
1743         uint64_t queue_expected_zero[] = {
1744                 0, /* rx */
1745                 0, /* tx */
1746                 0, /* drop */
1747                 7, /* inflight */
1748                 512, /* iq size */
1749                 0, /* iq 0 used */
1750                 0, /* iq 1 used */
1751                 0, /* iq 2 used */
1752                 0, /* iq 3 used */
1753                 /* QID-to-Port: pinned_flows, packets */
1754                 0, 0,
1755                 0, 0,
1756                 1, 0,
1757                 0, 0,
1758         };
1759         if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1760                         RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1761                         RTE_DIM(queue_names) != NUM_Q_STATS) {
1762                 printf("%d : queue array of wrong size\n", __LINE__);
1763                 goto fail;
1764         }
1765
1766         failed = 0;
1767         for (i = 0; (int)i < ret; i++) {
1768                 unsigned int id;
1769                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1770                                                                 queue_names[i],
1771                                                                 &id);
1772                 if (id != i + QUEUE_OFF) {
1773                         printf("%d: %s id incorrect, expected %d got %d\n",
1774                                         __LINE__, queue_names[i], i+QUEUE_OFF,
1775                                         id);
1776                         failed = 1;
1777                 }
1778                 if (val != queue_expected[i]) {
1779                         printf("%d: %d: %s value , expected %"PRIu64
1780                                 " got %"PRIu64"\n", i, __LINE__,
1781                                 queue_names[i], queue_expected[i], val);
1782                         failed = 1;
1783                 }
1784                 /* reset to zero */
1785                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1786                                                 RTE_EVENT_DEV_XSTATS_QUEUE,
1787                                                 queue, &id, 1);
1788                 if (reset_ret) {
1789                         printf("%d: failed to reset successfully\n", __LINE__);
1790                         failed = 1;
1791                 }
1792                 /* check value again */
1793                 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1794                                                         0);
1795                 if (val != queue_expected_zero[i]) {
1796                         printf("%d: %s value incorrect, expected %"PRIu64
1797                                 " got %"PRIu64"\n", __LINE__, queue_names[i],
1798                                 queue_expected_zero[i], val);
1799                         failed = 1;
1800                 }
1801         };
1802
1803         if (failed)
1804                 goto fail;
1805
1806         cleanup(t);
1807         return 0;
1808 fail:
1809         cleanup(t);
1810         return -1;
1811 }
1812
1813 static int
1814 ordered_reconfigure(struct test *t)
1815 {
1816         if (init(t, 1, 1) < 0 ||
1817                         create_ports(t, 1) < 0) {
1818                 printf("%d: Error initializing device\n", __LINE__);
1819                 return -1;
1820         }
1821
1822         const struct rte_event_queue_conf conf = {
1823                         .schedule_type = RTE_SCHED_TYPE_ORDERED,
1824                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1825                         .nb_atomic_flows = 1024,
1826                         .nb_atomic_order_sequences = 1024,
1827         };
1828
1829         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1830                 printf("%d: error creating qid\n", __LINE__);
1831                 goto failed;
1832         }
1833
1834         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1835                 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1836                 goto failed;
1837         }
1838
1839         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1840         if (rte_event_dev_start(evdev) < 0) {
1841                 printf("%d: Error with start call\n", __LINE__);
1842                 return -1;
1843         }
1844
1845         cleanup(t);
1846         return 0;
1847 failed:
1848         cleanup(t);
1849         return -1;
1850 }
1851
1852 static int
1853 qid_priorities(struct test *t)
1854 {
1855         /* Test works by having a CQ with enough empty space for all packets,
1856          * and enqueueing 3 packets to 3 QIDs. They must return based on the
1857          * priority of the QID, not the ingress order, to pass the test
1858          */
1859         unsigned int i;
1860         /* Create instance with 1 ports, and 3 qids */
1861         if (init(t, 3, 1) < 0 ||
1862                         create_ports(t, 1) < 0) {
1863                 printf("%d: Error initializing device\n", __LINE__);
1864                 return -1;
1865         }
1866
1867         for (i = 0; i < 3; i++) {
1868                 /* Create QID */
1869                 const struct rte_event_queue_conf conf = {
1870                         .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1871                         /* increase priority (0 == highest), as we go */
1872                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1873                         .nb_atomic_flows = 1024,
1874                         .nb_atomic_order_sequences = 1024,
1875                 };
1876
1877                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1878                         printf("%d: error creating qid %d\n", __LINE__, i);
1879                         return -1;
1880                 }
1881                 t->qid[i] = i;
1882         }
1883         t->nb_qids = i;
1884         /* map all QIDs to port */
1885         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1886
1887         if (rte_event_dev_start(evdev) < 0) {
1888                 printf("%d: Error with start call\n", __LINE__);
1889                 return -1;
1890         }
1891
1892         /* enqueue 3 packets, setting seqn and QID to check priority */
1893         for (i = 0; i < 3; i++) {
1894                 struct rte_event ev;
1895                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1896                 if (!arp) {
1897                         printf("%d: gen of pkt failed\n", __LINE__);
1898                         return -1;
1899                 }
1900                 ev.queue_id = t->qid[i];
1901                 ev.op = RTE_EVENT_OP_NEW;
1902                 ev.mbuf = arp;
1903                 arp->seqn = i;
1904
1905                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1906                 if (err != 1) {
1907                         printf("%d: Failed to enqueue\n", __LINE__);
1908                         return -1;
1909                 }
1910         }
1911
1912         rte_service_run_iter_on_app_lcore(t->service_id);
1913
1914         /* dequeue packets, verify priority was upheld */
1915         struct rte_event ev[32];
1916         uint32_t deq_pkts =
1917                 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1918         if (deq_pkts != 3) {
1919                 printf("%d: failed to deq packets\n", __LINE__);
1920                 rte_event_dev_dump(evdev, stdout);
1921                 return -1;
1922         }
1923         for (i = 0; i < 3; i++) {
1924                 if (ev[i].mbuf->seqn != 2-i) {
1925                         printf(
1926                                 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1927                                         __LINE__, i);
1928                 }
1929         }
1930
1931         cleanup(t);
1932         return 0;
1933 }
1934
1935 static int
1936 load_balancing(struct test *t)
1937 {
1938         const int rx_enq = 0;
1939         int err;
1940         uint32_t i;
1941
1942         if (init(t, 1, 4) < 0 ||
1943                         create_ports(t, 4) < 0 ||
1944                         create_atomic_qids(t, 1) < 0) {
1945                 printf("%d: Error initializing device\n", __LINE__);
1946                 return -1;
1947         }
1948
1949         for (i = 0; i < 3; i++) {
1950                 /* map port 1 - 3 inclusive */
1951                 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1952                                 NULL, 1) != 1) {
1953                         printf("%d: error mapping qid to port %d\n",
1954                                         __LINE__, i);
1955                         return -1;
1956                 }
1957         }
1958
1959         if (rte_event_dev_start(evdev) < 0) {
1960                 printf("%d: Error with start call\n", __LINE__);
1961                 return -1;
1962         }
1963
1964         /************** FORWARD ****************/
1965         /*
1966          * Create a set of flows that test the load-balancing operation of the
1967          * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
1968          * with a new flow, which should be sent to the 3rd mapped CQ
1969          */
1970         static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
1971
1972         for (i = 0; i < RTE_DIM(flows); i++) {
1973                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1974                 if (!arp) {
1975                         printf("%d: gen of pkt failed\n", __LINE__);
1976                         return -1;
1977                 }
1978
1979                 struct rte_event ev = {
1980                                 .op = RTE_EVENT_OP_NEW,
1981                                 .queue_id = t->qid[0],
1982                                 .flow_id = flows[i],
1983                                 .mbuf = arp,
1984                 };
1985                 /* generate pkt and enqueue */
1986                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1987                 if (err < 0) {
1988                         printf("%d: Failed to enqueue\n", __LINE__);
1989                         return -1;
1990                 }
1991         }
1992
1993         rte_service_run_iter_on_app_lcore(t->service_id);
1994
1995         struct test_event_dev_stats stats;
1996         err = test_event_dev_stats_get(evdev, &stats);
1997         if (err) {
1998                 printf("%d: failed to get stats\n", __LINE__);
1999                 return -1;
2000         }
2001
2002         if (stats.port_inflight[1] != 4) {
2003                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2004                                 __func__);
2005                 return -1;
2006         }
2007         if (stats.port_inflight[2] != 2) {
2008                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2009                                 __func__);
2010                 return -1;
2011         }
2012         if (stats.port_inflight[3] != 3) {
2013                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2014                                 __func__);
2015                 return -1;
2016         }
2017
2018         cleanup(t);
2019         return 0;
2020 }
2021
2022 static int
2023 load_balancing_history(struct test *t)
2024 {
2025         struct test_event_dev_stats stats = {0};
2026         const int rx_enq = 0;
2027         int err;
2028         uint32_t i;
2029
2030         /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2031         if (init(t, 1, 4) < 0 ||
2032                         create_ports(t, 4) < 0 ||
2033                         create_atomic_qids(t, 1) < 0)
2034                 return -1;
2035
2036         /* CQ mapping to QID */
2037         if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2038                 printf("%d: error mapping port 1 qid\n", __LINE__);
2039                 return -1;
2040         }
2041         if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2042                 printf("%d: error mapping port 2 qid\n", __LINE__);
2043                 return -1;
2044         }
2045         if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2046                 printf("%d: error mapping port 3 qid\n", __LINE__);
2047                 return -1;
2048         }
2049         if (rte_event_dev_start(evdev) < 0) {
2050                 printf("%d: Error with start call\n", __LINE__);
2051                 return -1;
2052         }
2053
2054         /*
2055          * Create a set of flows that test the load-balancing operation of the
2056          * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2057          * the packet from CQ 0, send in a new set of flows. Ensure that:
2058          *  1. The new flow 3 gets into the empty CQ0
2059          *  2. packets for existing flow gets added into CQ1
2060          *  3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2061          *     more outstanding pkts
2062          *
2063          *  This test makes sure that when a flow ends (i.e. all packets
2064          *  have been completed for that flow), that the flow can be moved
2065          *  to a different CQ when new packets come in for that flow.
2066          */
2067         static uint32_t flows1[] = {0, 1, 1, 2};
2068
2069         for (i = 0; i < RTE_DIM(flows1); i++) {
2070                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2071                 struct rte_event ev = {
2072                                 .flow_id = flows1[i],
2073                                 .op = RTE_EVENT_OP_NEW,
2074                                 .queue_id = t->qid[0],
2075                                 .event_type = RTE_EVENT_TYPE_CPU,
2076                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2077                                 .mbuf = arp
2078                 };
2079
2080                 if (!arp) {
2081                         printf("%d: gen of pkt failed\n", __LINE__);
2082                         return -1;
2083                 }
2084                 arp->hash.rss = flows1[i];
2085                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2086                 if (err < 0) {
2087                         printf("%d: Failed to enqueue\n", __LINE__);
2088                         return -1;
2089                 }
2090         }
2091
2092         /* call the scheduler */
2093         rte_service_run_iter_on_app_lcore(t->service_id);
2094
2095         /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2096         struct rte_event ev;
2097         if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2098                 printf("%d: failed to dequeue\n", __LINE__);
2099                 return -1;
2100         }
2101         if (ev.mbuf->hash.rss != flows1[0]) {
2102                 printf("%d: unexpected flow received\n", __LINE__);
2103                 return -1;
2104         }
2105
2106         /* drop the flow 0 packet from port 1 */
2107         rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2108
2109         /* call the scheduler */
2110         rte_service_run_iter_on_app_lcore(t->service_id);
2111
2112         /*
2113          * Set up the next set of flows, first a new flow to fill up
2114          * CQ 0, so that the next flow 0 packet should go to CQ2
2115          */
2116         static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2117
2118         for (i = 0; i < RTE_DIM(flows2); i++) {
2119                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2120                 struct rte_event ev = {
2121                                 .flow_id = flows2[i],
2122                                 .op = RTE_EVENT_OP_NEW,
2123                                 .queue_id = t->qid[0],
2124                                 .event_type = RTE_EVENT_TYPE_CPU,
2125                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2126                                 .mbuf = arp
2127                 };
2128
2129                 if (!arp) {
2130                         printf("%d: gen of pkt failed\n", __LINE__);
2131                         return -1;
2132                 }
2133                 arp->hash.rss = flows2[i];
2134
2135                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2136                 if (err < 0) {
2137                         printf("%d: Failed to enqueue\n", __LINE__);
2138                         return -1;
2139                 }
2140         }
2141
2142         /* schedule */
2143         rte_service_run_iter_on_app_lcore(t->service_id);
2144
2145         err = test_event_dev_stats_get(evdev, &stats);
2146         if (err) {
2147                 printf("%d:failed to get stats\n", __LINE__);
2148                 return -1;
2149         }
2150
2151         /*
2152          * Now check the resulting inflights on each port.
2153          */
2154         if (stats.port_inflight[1] != 3) {
2155                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2156                                 __func__);
2157                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2158                                 (unsigned int)stats.port_inflight[1],
2159                                 (unsigned int)stats.port_inflight[2],
2160                                 (unsigned int)stats.port_inflight[3]);
2161                 return -1;
2162         }
2163         if (stats.port_inflight[2] != 4) {
2164                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2165                                 __func__);
2166                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2167                                 (unsigned int)stats.port_inflight[1],
2168                                 (unsigned int)stats.port_inflight[2],
2169                                 (unsigned int)stats.port_inflight[3]);
2170                 return -1;
2171         }
2172         if (stats.port_inflight[3] != 2) {
2173                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2174                                 __func__);
2175                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2176                                 (unsigned int)stats.port_inflight[1],
2177                                 (unsigned int)stats.port_inflight[2],
2178                                 (unsigned int)stats.port_inflight[3]);
2179                 return -1;
2180         }
2181
2182         for (i = 1; i <= 3; i++) {
2183                 struct rte_event ev;
2184                 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2185                         rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2186         }
2187         rte_service_run_iter_on_app_lcore(t->service_id);
2188
2189         cleanup(t);
2190         return 0;
2191 }
2192
2193 static int
2194 invalid_qid(struct test *t)
2195 {
2196         struct test_event_dev_stats stats;
2197         const int rx_enq = 0;
2198         int err;
2199         uint32_t i;
2200
2201         if (init(t, 1, 4) < 0 ||
2202                         create_ports(t, 4) < 0 ||
2203                         create_atomic_qids(t, 1) < 0) {
2204                 printf("%d: Error initializing device\n", __LINE__);
2205                 return -1;
2206         }
2207
2208         /* CQ mapping to QID */
2209         for (i = 0; i < 4; i++) {
2210                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2211                                 NULL, 1);
2212                 if (err != 1) {
2213                         printf("%d: error mapping port 1 qid\n", __LINE__);
2214                         return -1;
2215                 }
2216         }
2217
2218         if (rte_event_dev_start(evdev) < 0) {
2219                 printf("%d: Error with start call\n", __LINE__);
2220                 return -1;
2221         }
2222
2223         /*
2224          * Send in a packet with an invalid qid to the scheduler.
2225          * We should see the packed enqueued OK, but the inflights for
2226          * that packet should not be incremented, and the rx_dropped
2227          * should be incremented.
2228          */
2229         static uint32_t flows1[] = {20};
2230
2231         for (i = 0; i < RTE_DIM(flows1); i++) {
2232                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2233                 if (!arp) {
2234                         printf("%d: gen of pkt failed\n", __LINE__);
2235                         return -1;
2236                 }
2237
2238                 struct rte_event ev = {
2239                                 .op = RTE_EVENT_OP_NEW,
2240                                 .queue_id = t->qid[0] + flows1[i],
2241                                 .flow_id = i,
2242                                 .mbuf = arp,
2243                 };
2244                 /* generate pkt and enqueue */
2245                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2246                 if (err < 0) {
2247                         printf("%d: Failed to enqueue\n", __LINE__);
2248                         return -1;
2249                 }
2250         }
2251
2252         /* call the scheduler */
2253         rte_service_run_iter_on_app_lcore(t->service_id);
2254
2255         err = test_event_dev_stats_get(evdev, &stats);
2256         if (err) {
2257                 printf("%d: failed to get stats\n", __LINE__);
2258                 return -1;
2259         }
2260
2261         /*
2262          * Now check the resulting inflights on the port, and the rx_dropped.
2263          */
2264         if (stats.port_inflight[0] != 0) {
2265                 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2266                                 __func__);
2267                 rte_event_dev_dump(evdev, stdout);
2268                 return -1;
2269         }
2270         if (stats.port_rx_dropped[0] != 1) {
2271                 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2272                 rte_event_dev_dump(evdev, stdout);
2273                 return -1;
2274         }
2275         /* each packet drop should only be counted in one place - port or dev */
2276         if (stats.rx_dropped != 0) {
2277                 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2278                                 __func__);
2279                 rte_event_dev_dump(evdev, stdout);
2280                 return -1;
2281         }
2282
2283         cleanup(t);
2284         return 0;
2285 }
2286
2287 static int
2288 single_packet(struct test *t)
2289 {
2290         const uint32_t MAGIC_SEQN = 7321;
2291         struct rte_event ev;
2292         struct test_event_dev_stats stats;
2293         const int rx_enq = 0;
2294         const int wrk_enq = 2;
2295         int err;
2296
2297         /* Create instance with 4 ports */
2298         if (init(t, 1, 4) < 0 ||
2299                         create_ports(t, 4) < 0 ||
2300                         create_atomic_qids(t, 1) < 0) {
2301                 printf("%d: Error initializing device\n", __LINE__);
2302                 return -1;
2303         }
2304
2305         /* CQ mapping to QID */
2306         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2307         if (err != 1) {
2308                 printf("%d: error mapping lb qid\n", __LINE__);
2309                 cleanup(t);
2310                 return -1;
2311         }
2312
2313         if (rte_event_dev_start(evdev) < 0) {
2314                 printf("%d: Error with start call\n", __LINE__);
2315                 return -1;
2316         }
2317
2318         /************** Gen pkt and enqueue ****************/
2319         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2320         if (!arp) {
2321                 printf("%d: gen of pkt failed\n", __LINE__);
2322                 return -1;
2323         }
2324
2325         ev.op = RTE_EVENT_OP_NEW;
2326         ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2327         ev.mbuf = arp;
2328         ev.queue_id = 0;
2329         ev.flow_id = 3;
2330         arp->seqn = MAGIC_SEQN;
2331
2332         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2333         if (err < 0) {
2334                 printf("%d: Failed to enqueue\n", __LINE__);
2335                 return -1;
2336         }
2337
2338         rte_service_run_iter_on_app_lcore(t->service_id);
2339
2340         err = test_event_dev_stats_get(evdev, &stats);
2341         if (err) {
2342                 printf("%d: failed to get stats\n", __LINE__);
2343                 return -1;
2344         }
2345
2346         if (stats.rx_pkts != 1 ||
2347                         stats.tx_pkts != 1 ||
2348                         stats.port_inflight[wrk_enq] != 1) {
2349                 printf("%d: Sched core didn't handle pkt as expected\n",
2350                                 __LINE__);
2351                 rte_event_dev_dump(evdev, stdout);
2352                 return -1;
2353         }
2354
2355         uint32_t deq_pkts;
2356
2357         deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2358         if (deq_pkts < 1) {
2359                 printf("%d: Failed to deq\n", __LINE__);
2360                 return -1;
2361         }
2362
2363         err = test_event_dev_stats_get(evdev, &stats);
2364         if (err) {
2365                 printf("%d: failed to get stats\n", __LINE__);
2366                 return -1;
2367         }
2368
2369         err = test_event_dev_stats_get(evdev, &stats);
2370         if (ev.mbuf->seqn != MAGIC_SEQN) {
2371                 printf("%d: magic sequence number not dequeued\n", __LINE__);
2372                 return -1;
2373         }
2374
2375         rte_pktmbuf_free(ev.mbuf);
2376         err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2377         if (err < 0) {
2378                 printf("%d: Failed to enqueue\n", __LINE__);
2379                 return -1;
2380         }
2381         rte_service_run_iter_on_app_lcore(t->service_id);
2382
2383         err = test_event_dev_stats_get(evdev, &stats);
2384         if (stats.port_inflight[wrk_enq] != 0) {
2385                 printf("%d: port inflight not correct\n", __LINE__);
2386                 return -1;
2387         }
2388
2389         cleanup(t);
2390         return 0;
2391 }
2392
2393 static int
2394 inflight_counts(struct test *t)
2395 {
2396         struct rte_event ev;
2397         struct test_event_dev_stats stats;
2398         const int rx_enq = 0;
2399         const int p1 = 1;
2400         const int p2 = 2;
2401         int err;
2402         int i;
2403
2404         /* Create instance with 4 ports */
2405         if (init(t, 2, 3) < 0 ||
2406                         create_ports(t, 3) < 0 ||
2407                         create_atomic_qids(t, 2) < 0) {
2408                 printf("%d: Error initializing device\n", __LINE__);
2409                 return -1;
2410         }
2411
2412         /* CQ mapping to QID */
2413         err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2414         if (err != 1) {
2415                 printf("%d: error mapping lb qid\n", __LINE__);
2416                 cleanup(t);
2417                 return -1;
2418         }
2419         err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2420         if (err != 1) {
2421                 printf("%d: error mapping lb qid\n", __LINE__);
2422                 cleanup(t);
2423                 return -1;
2424         }
2425
2426         if (rte_event_dev_start(evdev) < 0) {
2427                 printf("%d: Error with start call\n", __LINE__);
2428                 return -1;
2429         }
2430
2431         /************** FORWARD ****************/
2432 #define QID1_NUM 5
2433         for (i = 0; i < QID1_NUM; i++) {
2434                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2435
2436                 if (!arp) {
2437                         printf("%d: gen of pkt failed\n", __LINE__);
2438                         goto err;
2439                 }
2440
2441                 ev.queue_id =  t->qid[0];
2442                 ev.op = RTE_EVENT_OP_NEW;
2443                 ev.mbuf = arp;
2444                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2445                 if (err != 1) {
2446                         printf("%d: Failed to enqueue\n", __LINE__);
2447                         goto err;
2448                 }
2449         }
2450 #define QID2_NUM 3
2451         for (i = 0; i < QID2_NUM; i++) {
2452                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2453
2454                 if (!arp) {
2455                         printf("%d: gen of pkt failed\n", __LINE__);
2456                         goto err;
2457                 }
2458                 ev.queue_id =  t->qid[1];
2459                 ev.op = RTE_EVENT_OP_NEW;
2460                 ev.mbuf = arp;
2461                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2462                 if (err != 1) {
2463                         printf("%d: Failed to enqueue\n", __LINE__);
2464                         goto err;
2465                 }
2466         }
2467
2468         /* schedule */
2469         rte_service_run_iter_on_app_lcore(t->service_id);
2470
2471         err = test_event_dev_stats_get(evdev, &stats);
2472         if (err) {
2473                 printf("%d: failed to get stats\n", __LINE__);
2474                 goto err;
2475         }
2476
2477         if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2478                         stats.tx_pkts != QID1_NUM + QID2_NUM) {
2479                 printf("%d: Sched core didn't handle pkt as expected\n",
2480                                 __LINE__);
2481                 goto err;
2482         }
2483
2484         if (stats.port_inflight[p1] != QID1_NUM) {
2485                 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2486                                 __func__);
2487                 goto err;
2488         }
2489         if (stats.port_inflight[p2] != QID2_NUM) {
2490                 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2491                                 __func__);
2492                 goto err;
2493         }
2494
2495         /************** DEQUEUE INFLIGHT COUNT CHECKS  ****************/
2496         /* port 1 */
2497         struct rte_event events[QID1_NUM + QID2_NUM];
2498         uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2499                         RTE_DIM(events), 0);
2500
2501         if (deq_pkts != QID1_NUM) {
2502                 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2503                 goto err;
2504         }
2505         err = test_event_dev_stats_get(evdev, &stats);
2506         if (stats.port_inflight[p1] != QID1_NUM) {
2507                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2508                                 __LINE__);
2509                 goto err;
2510         }
2511         for (i = 0; i < QID1_NUM; i++) {
2512                 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2513                                 1);
2514                 if (err != 1) {
2515                         printf("%d: %s rte enqueue of inf release failed\n",
2516                                 __LINE__, __func__);
2517                         goto err;
2518                 }
2519         }
2520
2521         /*
2522          * As the scheduler core decrements inflights, it needs to run to
2523          * process packets to act on the drop messages
2524          */
2525         rte_service_run_iter_on_app_lcore(t->service_id);
2526
2527         err = test_event_dev_stats_get(evdev, &stats);
2528         if (stats.port_inflight[p1] != 0) {
2529                 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2530                 goto err;
2531         }
2532
2533         /* port2 */
2534         deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2535                         RTE_DIM(events), 0);
2536         if (deq_pkts != QID2_NUM) {
2537                 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2538                 goto err;
2539         }
2540         err = test_event_dev_stats_get(evdev, &stats);
2541         if (stats.port_inflight[p2] != QID2_NUM) {
2542                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2543                                 __LINE__);
2544                 goto err;
2545         }
2546         for (i = 0; i < QID2_NUM; i++) {
2547                 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2548                                 1);
2549                 if (err != 1) {
2550                         printf("%d: %s rte enqueue of inf release failed\n",
2551                                 __LINE__, __func__);
2552                         goto err;
2553                 }
2554         }
2555
2556         /*
2557          * As the scheduler core decrements inflights, it needs to run to
2558          * process packets to act on the drop messages
2559          */
2560         rte_service_run_iter_on_app_lcore(t->service_id);
2561
2562         err = test_event_dev_stats_get(evdev, &stats);
2563         if (stats.port_inflight[p2] != 0) {
2564                 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2565                 goto err;
2566         }
2567         cleanup(t);
2568         return 0;
2569
2570 err:
2571         rte_event_dev_dump(evdev, stdout);
2572         cleanup(t);
2573         return -1;
2574 }
2575
2576 static int
2577 parallel_basic(struct test *t, int check_order)
2578 {
2579         const uint8_t rx_port = 0;
2580         const uint8_t w1_port = 1;
2581         const uint8_t w3_port = 3;
2582         const uint8_t tx_port = 4;
2583         int err;
2584         int i;
2585         uint32_t deq_pkts, j;
2586         struct rte_mbuf *mbufs[3];
2587         struct rte_mbuf *mbufs_out[3] = { 0 };
2588         const uint32_t MAGIC_SEQN = 1234;
2589
2590         /* Create instance with 4 ports */
2591         if (init(t, 2, tx_port + 1) < 0 ||
2592                         create_ports(t, tx_port + 1) < 0 ||
2593                         (check_order ?  create_ordered_qids(t, 1) :
2594                                 create_unordered_qids(t, 1)) < 0 ||
2595                         create_directed_qids(t, 1, &tx_port)) {
2596                 printf("%d: Error initializing device\n", __LINE__);
2597                 return -1;
2598         }
2599
2600         /*
2601          * CQ mapping to QID
2602          * We need three ports, all mapped to the same ordered qid0. Then we'll
2603          * take a packet out to each port, re-enqueue in reverse order,
2604          * then make sure the reordering has taken place properly when we
2605          * dequeue from the tx_port.
2606          *
2607          * Simplified test setup diagram:
2608          *
2609          * rx_port        w1_port
2610          *        \     /         \
2611          *         qid0 - w2_port - qid1
2612          *              \         /     \
2613          *                w3_port        tx_port
2614          */
2615         /* CQ mapping to QID for LB ports (directed mapped on create) */
2616         for (i = w1_port; i <= w3_port; i++) {
2617                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2618                                 1);
2619                 if (err != 1) {
2620                         printf("%d: error mapping lb qid\n", __LINE__);
2621                         cleanup(t);
2622                         return -1;
2623                 }
2624         }
2625
2626         if (rte_event_dev_start(evdev) < 0) {
2627                 printf("%d: Error with start call\n", __LINE__);
2628                 return -1;
2629         }
2630
2631         /* Enqueue 3 packets to the rx port */
2632         for (i = 0; i < 3; i++) {
2633                 struct rte_event ev;
2634                 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2635                 if (!mbufs[i]) {
2636                         printf("%d: gen of pkt failed\n", __LINE__);
2637                         return -1;
2638                 }
2639
2640                 ev.queue_id = t->qid[0];
2641                 ev.op = RTE_EVENT_OP_NEW;
2642                 ev.mbuf = mbufs[i];
2643                 mbufs[i]->seqn = MAGIC_SEQN + i;
2644
2645                 /* generate pkt and enqueue */
2646                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2647                 if (err != 1) {
2648                         printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2649                                         __LINE__, i, err);
2650                         return -1;
2651                 }
2652         }
2653
2654         rte_service_run_iter_on_app_lcore(t->service_id);
2655
2656         /* use extra slot to make logic in loops easier */
2657         struct rte_event deq_ev[w3_port + 1];
2658
2659         /* Dequeue the 3 packets, one from each worker port */
2660         for (i = w1_port; i <= w3_port; i++) {
2661                 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2662                                 &deq_ev[i], 1, 0);
2663                 if (deq_pkts != 1) {
2664                         printf("%d: Failed to deq\n", __LINE__);
2665                         rte_event_dev_dump(evdev, stdout);
2666                         return -1;
2667                 }
2668         }
2669
2670         /* Enqueue each packet in reverse order, flushing after each one */
2671         for (i = w3_port; i >= w1_port; i--) {
2672
2673                 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2674                 deq_ev[i].queue_id = t->qid[1];
2675                 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2676                 if (err != 1) {
2677                         printf("%d: Failed to enqueue\n", __LINE__);
2678                         return -1;
2679                 }
2680         }
2681         rte_service_run_iter_on_app_lcore(t->service_id);
2682
2683         /* dequeue from the tx ports, we should get 3 packets */
2684         deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2685                         3, 0);
2686
2687         /* Check to see if we've got all 3 packets */
2688         if (deq_pkts != 3) {
2689                 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2690                         __LINE__, deq_pkts, tx_port);
2691                 rte_event_dev_dump(evdev, stdout);
2692                 return 1;
2693         }
2694
2695         /* Check to see if the sequence numbers are in expected order */
2696         if (check_order) {
2697                 for (j = 0 ; j < deq_pkts ; j++) {
2698                         if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2699                                 printf(
2700                                         "%d: Incorrect sequence number(%d) from port %d\n",
2701                                         __LINE__, mbufs_out[j]->seqn, tx_port);
2702                                 return -1;
2703                         }
2704                 }
2705         }
2706
2707         /* Destroy the instance */
2708         cleanup(t);
2709         return 0;
2710 }
2711
2712 static int
2713 ordered_basic(struct test *t)
2714 {
2715         return parallel_basic(t, 1);
2716 }
2717
2718 static int
2719 unordered_basic(struct test *t)
2720 {
2721         return parallel_basic(t, 0);
2722 }
2723
2724 static int
2725 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2726 {
2727         const struct rte_event new_ev = {
2728                         .op = RTE_EVENT_OP_NEW
2729                         /* all other fields zero */
2730         };
2731         struct rte_event ev = new_ev;
2732         unsigned int rx_port = 0; /* port we get the first flow on */
2733         char rx_port_used_stat[64];
2734         char rx_port_free_stat[64];
2735         char other_port_used_stat[64];
2736
2737         if (init(t, 1, 2) < 0 ||
2738                         create_ports(t, 2) < 0 ||
2739                         create_atomic_qids(t, 1) < 0) {
2740                 printf("%d: Error initializing device\n", __LINE__);
2741                 return -1;
2742         }
2743         int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2744         if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2745                         nb_links != 1) {
2746                 printf("%d: Error links queue to ports\n", __LINE__);
2747                 goto err;
2748         }
2749         if (rte_event_dev_start(evdev) < 0) {
2750                 printf("%d: Error with start call\n", __LINE__);
2751                 goto err;
2752         }
2753
2754         /* send one packet and see where it goes, port 0 or 1 */
2755         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2756                 printf("%d: Error doing first enqueue\n", __LINE__);
2757                 goto err;
2758         }
2759         rte_service_run_iter_on_app_lcore(t->service_id);
2760
2761         if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2762                         != 1)
2763                 rx_port = 1;
2764
2765         snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2766                         "port_%u_cq_ring_used", rx_port);
2767         snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2768                         "port_%u_cq_ring_free", rx_port);
2769         snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2770                         "port_%u_cq_ring_used", rx_port ^ 1);
2771         if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2772                         != 1) {
2773                 printf("%d: Error, first event not scheduled\n", __LINE__);
2774                 goto err;
2775         }
2776
2777         /* now fill up the rx port's queue with one flow to cause HOLB */
2778         do {
2779                 ev = new_ev;
2780                 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2781                         printf("%d: Error with enqueue\n", __LINE__);
2782                         goto err;
2783                 }
2784                 rte_service_run_iter_on_app_lcore(t->service_id);
2785         } while (rte_event_dev_xstats_by_name_get(evdev,
2786                                 rx_port_free_stat, NULL) != 0);
2787
2788         /* one more packet, which needs to stay in IQ - i.e. HOLB */
2789         ev = new_ev;
2790         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2791                 printf("%d: Error with enqueue\n", __LINE__);
2792                 goto err;
2793         }
2794         rte_service_run_iter_on_app_lcore(t->service_id);
2795
2796         /* check that the other port still has an empty CQ */
2797         if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2798                         != 0) {
2799                 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2800                 goto err;
2801         }
2802         /* check IQ now has one packet */
2803         if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2804                         != 1) {
2805                 printf("%d: Error, QID does not have exactly 1 packet\n",
2806                         __LINE__);
2807                 goto err;
2808         }
2809
2810         /* send another flow, which should pass the other IQ entry */
2811         ev = new_ev;
2812         ev.flow_id = 1;
2813         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2814                 printf("%d: Error with enqueue\n", __LINE__);
2815                 goto err;
2816         }
2817         rte_service_run_iter_on_app_lcore(t->service_id);
2818
2819         if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2820                         != 1) {
2821                 printf("%d: Error, second flow did not pass out first\n",
2822                         __LINE__);
2823                 goto err;
2824         }
2825
2826         if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2827                         != 1) {
2828                 printf("%d: Error, QID does not have exactly 1 packet\n",
2829                         __LINE__);
2830                 goto err;
2831         }
2832         cleanup(t);
2833         return 0;
2834 err:
2835         rte_event_dev_dump(evdev, stdout);
2836         cleanup(t);
2837         return -1;
2838 }
2839
2840 static int
2841 worker_loopback_worker_fn(void *arg)
2842 {
2843         struct test *t = arg;
2844         uint8_t port = t->port[1];
2845         int count = 0;
2846         int enqd;
2847
2848         /*
2849          * Takes packets from the input port and then loops them back through
2850          * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2851          * so each packet goes through 8*16 = 128 times.
2852          */
2853         printf("%d: \tWorker function started\n", __LINE__);
2854         while (count < NUM_PACKETS) {
2855 #define BURST_SIZE 32
2856                 struct rte_event ev[BURST_SIZE];
2857                 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2858                                 BURST_SIZE, 0);
2859                 if (nb_rx == 0) {
2860                         rte_pause();
2861                         continue;
2862                 }
2863
2864                 for (i = 0; i < nb_rx; i++) {
2865                         ev[i].queue_id++;
2866                         if (ev[i].queue_id != 8) {
2867                                 ev[i].op = RTE_EVENT_OP_FORWARD;
2868                                 enqd = rte_event_enqueue_burst(evdev, port,
2869                                                 &ev[i], 1);
2870                                 if (enqd != 1) {
2871                                         printf("%d: Can't enqueue FWD!!\n",
2872                                                         __LINE__);
2873                                         return -1;
2874                                 }
2875                                 continue;
2876                         }
2877
2878                         ev[i].queue_id = 0;
2879                         ev[i].mbuf->udata64++;
2880                         if (ev[i].mbuf->udata64 != 16) {
2881                                 ev[i].op = RTE_EVENT_OP_FORWARD;
2882                                 enqd = rte_event_enqueue_burst(evdev, port,
2883                                                 &ev[i], 1);
2884                                 if (enqd != 1) {
2885                                         printf("%d: Can't enqueue FWD!!\n",
2886                                                         __LINE__);
2887                                         return -1;
2888                                 }
2889                                 continue;
2890                         }
2891                         /* we have hit 16 iterations through system - drop */
2892                         rte_pktmbuf_free(ev[i].mbuf);
2893                         count++;
2894                         ev[i].op = RTE_EVENT_OP_RELEASE;
2895                         enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
2896                         if (enqd != 1) {
2897                                 printf("%d drop enqueue failed\n", __LINE__);
2898                                 return -1;
2899                         }
2900                 }
2901         }
2902
2903         return 0;
2904 }
2905
2906 static int
2907 worker_loopback_producer_fn(void *arg)
2908 {
2909         struct test *t = arg;
2910         uint8_t port = t->port[0];
2911         uint64_t count = 0;
2912
2913         printf("%d: \tProducer function started\n", __LINE__);
2914         while (count < NUM_PACKETS) {
2915                 struct rte_mbuf *m = 0;
2916                 do {
2917                         m = rte_pktmbuf_alloc(t->mbuf_pool);
2918                 } while (m == NULL);
2919
2920                 m->udata64 = 0;
2921
2922                 struct rte_event ev = {
2923                                 .op = RTE_EVENT_OP_NEW,
2924                                 .queue_id = t->qid[0],
2925                                 .flow_id = (uintptr_t)m & 0xFFFF,
2926                                 .mbuf = m,
2927                 };
2928
2929                 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
2930                         while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
2931                                         1)
2932                                 rte_pause();
2933                 }
2934
2935                 count++;
2936         }
2937
2938         return 0;
2939 }
2940
2941 static int
2942 worker_loopback(struct test *t)
2943 {
2944         /* use a single producer core, and a worker core to see what happens
2945          * if the worker loops packets back multiple times
2946          */
2947         struct test_event_dev_stats stats;
2948         uint64_t print_cycles = 0, cycles = 0;
2949         uint64_t tx_pkts = 0;
2950         int err;
2951         int w_lcore, p_lcore;
2952
2953         if (init(t, 8, 2) < 0 ||
2954                         create_atomic_qids(t, 8) < 0) {
2955                 printf("%d: Error initializing device\n", __LINE__);
2956                 return -1;
2957         }
2958
2959         /* RX with low max events */
2960         static struct rte_event_port_conf conf = {
2961                         .dequeue_depth = 32,
2962                         .enqueue_depth = 64,
2963         };
2964         /* beware: this cannot be initialized in the static above as it would
2965          * only be initialized once - and this needs to be set for multiple runs
2966          */
2967         conf.new_event_threshold = 512;
2968
2969         if (rte_event_port_setup(evdev, 0, &conf) < 0) {
2970                 printf("Error setting up RX port\n");
2971                 return -1;
2972         }
2973         t->port[0] = 0;
2974         /* TX with higher max events */
2975         conf.new_event_threshold = 4096;
2976         if (rte_event_port_setup(evdev, 1, &conf) < 0) {
2977                 printf("Error setting up TX port\n");
2978                 return -1;
2979         }
2980         t->port[1] = 1;
2981
2982         /* CQ mapping to QID */
2983         err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2984         if (err != 8) { /* should have mapped all queues*/
2985                 printf("%d: error mapping port 2 to all qids\n", __LINE__);
2986                 return -1;
2987         }
2988
2989         if (rte_event_dev_start(evdev) < 0) {
2990                 printf("%d: Error with start call\n", __LINE__);
2991                 return -1;
2992         }
2993
2994         p_lcore = rte_get_next_lcore(
2995                         /* start core */ -1,
2996                         /* skip master */ 1,
2997                         /* wrap */ 0);
2998         w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
2999
3000         rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
3001         rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
3002
3003         print_cycles = cycles = rte_get_timer_cycles();
3004         while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
3005                         rte_eal_get_lcore_state(w_lcore) != FINISHED) {
3006
3007                 rte_service_run_iter_on_app_lcore(t->service_id);
3008
3009                 uint64_t new_cycles = rte_get_timer_cycles();
3010
3011                 if (new_cycles - print_cycles > rte_get_timer_hz()) {
3012                         test_event_dev_stats_get(evdev, &stats);
3013                         printf(
3014                                 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
3015                                 __LINE__, stats.rx_pkts, stats.tx_pkts);
3016
3017                         print_cycles = new_cycles;
3018                 }
3019                 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
3020                         test_event_dev_stats_get(evdev, &stats);
3021                         if (stats.tx_pkts == tx_pkts) {
3022                                 rte_event_dev_dump(evdev, stdout);
3023                                 printf("Dumping xstats:\n");
3024                                 xstats_print();
3025                                 printf(
3026                                         "%d: No schedules for seconds, deadlock\n",
3027                                         __LINE__);
3028                                 return -1;
3029                         }
3030                         tx_pkts = stats.tx_pkts;
3031                         cycles = new_cycles;
3032                 }
3033         }
3034         rte_service_run_iter_on_app_lcore(t->service_id);
3035         /* ensure all completions are flushed */
3036
3037         rte_eal_mp_wait_lcore();
3038
3039         cleanup(t);
3040         return 0;
3041 }
3042
3043 static struct rte_mempool *eventdev_func_mempool;
3044
3045 static int
3046 test_sw_eventdev(void)
3047 {
3048         struct test *t = malloc(sizeof(struct test));
3049         int ret;
3050
3051         /* manually initialize the op, older gcc's complain on static
3052          * initialization of struct elements that are a bitfield.
3053          */
3054         release_ev.op = RTE_EVENT_OP_RELEASE;
3055
3056         const char *eventdev_name = "event_sw0";
3057         evdev = rte_event_dev_get_dev_id(eventdev_name);
3058         if (evdev < 0) {
3059                 printf("%d: Eventdev %s not found - creating.\n",
3060                                 __LINE__, eventdev_name);
3061                 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3062                         printf("Error creating eventdev\n");
3063                         return -1;
3064                 }
3065                 evdev = rte_event_dev_get_dev_id(eventdev_name);
3066                 if (evdev < 0) {
3067                         printf("Error finding newly created eventdev\n");
3068                         return -1;
3069                 }
3070         }
3071
3072         if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
3073                 printf("Failed to get service ID for software event dev\n");
3074                 return -1;
3075         }
3076
3077         rte_service_runstate_set(t->service_id, 1);
3078         rte_service_set_runstate_mapped_check(t->service_id, 0);
3079
3080         /* Only create mbuf pool once, reuse for each test run */
3081         if (!eventdev_func_mempool) {
3082                 eventdev_func_mempool = rte_pktmbuf_pool_create(
3083                                 "EVENTDEV_SW_SA_MBUF_POOL",
3084                                 (1<<12), /* 4k buffers */
3085                                 32 /*MBUF_CACHE_SIZE*/,
3086                                 0,
3087                                 512, /* use very small mbufs */
3088                                 rte_socket_id());
3089                 if (!eventdev_func_mempool) {
3090                         printf("ERROR creating mempool\n");
3091                         return -1;
3092                 }
3093         }
3094         t->mbuf_pool = eventdev_func_mempool;
3095         printf("*** Running Single Directed Packet test...\n");
3096         ret = test_single_directed_packet(t);
3097         if (ret != 0) {
3098                 printf("ERROR - Single Directed Packet test FAILED.\n");
3099                 return ret;
3100         }
3101         printf("*** Running Directed Forward Credit test...\n");
3102         ret = test_directed_forward_credits(t);
3103         if (ret != 0) {
3104                 printf("ERROR - Directed Forward Credit test FAILED.\n");
3105                 return ret;
3106         }
3107         printf("*** Running Single Load Balanced Packet test...\n");
3108         ret = single_packet(t);
3109         if (ret != 0) {
3110                 printf("ERROR - Single Packet test FAILED.\n");
3111                 return ret;
3112         }
3113         printf("*** Running Unordered Basic test...\n");
3114         ret = unordered_basic(t);
3115         if (ret != 0) {
3116                 printf("ERROR -  Unordered Basic test FAILED.\n");
3117                 return ret;
3118         }
3119         printf("*** Running Ordered Basic test...\n");
3120         ret = ordered_basic(t);
3121         if (ret != 0) {
3122                 printf("ERROR -  Ordered Basic test FAILED.\n");
3123                 return ret;
3124         }
3125         printf("*** Running Burst Packets test...\n");
3126         ret = burst_packets(t);
3127         if (ret != 0) {
3128                 printf("ERROR - Burst Packets test FAILED.\n");
3129                 return ret;
3130         }
3131         printf("*** Running Load Balancing test...\n");
3132         ret = load_balancing(t);
3133         if (ret != 0) {
3134                 printf("ERROR - Load Balancing test FAILED.\n");
3135                 return ret;
3136         }
3137         printf("*** Running Prioritized Directed test...\n");
3138         ret = test_priority_directed(t);
3139         if (ret != 0) {
3140                 printf("ERROR - Prioritized Directed test FAILED.\n");
3141                 return ret;
3142         }
3143         printf("*** Running Prioritized Atomic test...\n");
3144         ret = test_priority_atomic(t);
3145         if (ret != 0) {
3146                 printf("ERROR - Prioritized Atomic test FAILED.\n");
3147                 return ret;
3148         }
3149
3150         printf("*** Running Prioritized Ordered test...\n");
3151         ret = test_priority_ordered(t);
3152         if (ret != 0) {
3153                 printf("ERROR - Prioritized Ordered test FAILED.\n");
3154                 return ret;
3155         }
3156         printf("*** Running Prioritized Unordered test...\n");
3157         ret = test_priority_unordered(t);
3158         if (ret != 0) {
3159                 printf("ERROR - Prioritized Unordered test FAILED.\n");
3160                 return ret;
3161         }
3162         printf("*** Running Invalid QID test...\n");
3163         ret = invalid_qid(t);
3164         if (ret != 0) {
3165                 printf("ERROR - Invalid QID test FAILED.\n");
3166                 return ret;
3167         }
3168         printf("*** Running Load Balancing History test...\n");
3169         ret = load_balancing_history(t);
3170         if (ret != 0) {
3171                 printf("ERROR - Load Balancing History test FAILED.\n");
3172                 return ret;
3173         }
3174         printf("*** Running Inflight Count test...\n");
3175         ret = inflight_counts(t);
3176         if (ret != 0) {
3177                 printf("ERROR - Inflight Count test FAILED.\n");
3178                 return ret;
3179         }
3180         printf("*** Running Abuse Inflights test...\n");
3181         ret = abuse_inflights(t);
3182         if (ret != 0) {
3183                 printf("ERROR - Abuse Inflights test FAILED.\n");
3184                 return ret;
3185         }
3186         printf("*** Running XStats test...\n");
3187         ret = xstats_tests(t);
3188         if (ret != 0) {
3189                 printf("ERROR - XStats test FAILED.\n");
3190                 return ret;
3191         }
3192         printf("*** Running XStats ID Reset test...\n");
3193         ret = xstats_id_reset_tests(t);
3194         if (ret != 0) {
3195                 printf("ERROR - XStats ID Reset test FAILED.\n");
3196                 return ret;
3197         }
3198         printf("*** Running XStats Brute Force test...\n");
3199         ret = xstats_brute_force(t);
3200         if (ret != 0) {
3201                 printf("ERROR - XStats Brute Force test FAILED.\n");
3202                 return ret;
3203         }
3204         printf("*** Running XStats ID Abuse test...\n");
3205         ret = xstats_id_abuse_tests(t);
3206         if (ret != 0) {
3207                 printf("ERROR - XStats ID Abuse test FAILED.\n");
3208                 return ret;
3209         }
3210         printf("*** Running QID Priority test...\n");
3211         ret = qid_priorities(t);
3212         if (ret != 0) {
3213                 printf("ERROR - QID Priority test FAILED.\n");
3214                 return ret;
3215         }
3216         printf("*** Running Ordered Reconfigure test...\n");
3217         ret = ordered_reconfigure(t);
3218         if (ret != 0) {
3219                 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3220                 return ret;
3221         }
3222         printf("*** Running Port LB Single Reconfig test...\n");
3223         ret = port_single_lb_reconfig(t);
3224         if (ret != 0) {
3225                 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3226                 return ret;
3227         }
3228         printf("*** Running Port Reconfig Credits test...\n");
3229         ret = port_reconfig_credits(t);
3230         if (ret != 0) {
3231                 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3232                 return ret;
3233         }
3234         printf("*** Running Head-of-line-blocking test...\n");
3235         ret = holb(t);
3236         if (ret != 0) {
3237                 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3238                 return ret;
3239         }
3240         if (rte_lcore_count() >= 3) {
3241                 printf("*** Running Worker loopback test...\n");
3242                 ret = worker_loopback(t);
3243                 if (ret != 0) {
3244                         printf("ERROR - Worker loopback test FAILED.\n");
3245                         return ret;
3246                 }
3247         } else {
3248                 printf("### Not enough cores for worker loopback test.\n");
3249                 printf("### Need at least 3 cores for test.\n");
3250         }
3251         /*
3252          * Free test instance, leaving mempool initialized, and a pointer to it
3253          * in static eventdev_func_mempool, as it is re-used on re-runs
3254          */
3255         free(t);
3256
3257         return 0;
3258 }
3259
3260 REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);