c37e345b6353b45d27b3cdf35d40cb9492736eec
[dpdk.git] / test / test / test_eventdev_sw.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <string.h>
36 #include <stdint.h>
37 #include <errno.h>
38 #include <unistd.h>
39 #include <sys/queue.h>
40
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_launch.h>
44 #include <rte_eal.h>
45 #include <rte_per_lcore.h>
46 #include <rte_lcore.h>
47 #include <rte_debug.h>
48 #include <rte_ethdev.h>
49 #include <rte_cycles.h>
50
51 #include <rte_eventdev.h>
52 #include "test.h"
53
54 #define MAX_PORTS 16
55 #define MAX_QIDS 16
56 #define NUM_PACKETS (1<<18)
57
58 static int evdev;
59
60 struct test {
61         struct rte_mempool *mbuf_pool;
62         uint8_t port[MAX_PORTS];
63         uint8_t qid[MAX_QIDS];
64         int nb_qids;
65 };
66
67 static struct rte_event release_ev;
68
69 static inline struct rte_mbuf *
70 rte_gen_arp(int portid, struct rte_mempool *mp)
71 {
72         /*
73          * len = 14 + 46
74          * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
75          */
76         static const uint8_t arp_request[] = {
77                 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
78                 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
79                 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
80                 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
81                 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
82                 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
83                 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
84                 0x00, 0x00, 0x00, 0x00
85         };
86         struct rte_mbuf *m;
87         int pkt_len = sizeof(arp_request) - 1;
88
89         m = rte_pktmbuf_alloc(mp);
90         if (!m)
91                 return 0;
92
93         memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
94                 arp_request, pkt_len);
95         rte_pktmbuf_pkt_len(m) = pkt_len;
96         rte_pktmbuf_data_len(m) = pkt_len;
97
98         RTE_SET_USED(portid);
99
100         return m;
101 }
102
103 static void
104 xstats_print(void)
105 {
106         const uint32_t XSTATS_MAX = 1024;
107         uint32_t i;
108         uint32_t ids[XSTATS_MAX];
109         uint64_t values[XSTATS_MAX];
110         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
111
112         for (i = 0; i < XSTATS_MAX; i++)
113                 ids[i] = i;
114
115         /* Device names / values */
116         int ret = rte_event_dev_xstats_names_get(evdev,
117                                         RTE_EVENT_DEV_XSTATS_DEVICE, 0,
118                                         xstats_names, ids, XSTATS_MAX);
119         if (ret < 0) {
120                 printf("%d: xstats names get() returned error\n",
121                         __LINE__);
122                 return;
123         }
124         ret = rte_event_dev_xstats_get(evdev,
125                                         RTE_EVENT_DEV_XSTATS_DEVICE,
126                                         0, ids, values, ret);
127         if (ret > (signed int)XSTATS_MAX)
128                 printf("%s %d: more xstats available than space\n",
129                                 __func__, __LINE__);
130         for (i = 0; (signed int)i < ret; i++) {
131                 printf("%d : %s : %"PRIu64"\n",
132                                 i, xstats_names[i].name, values[i]);
133         }
134
135         /* Port names / values */
136         ret = rte_event_dev_xstats_names_get(evdev,
137                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
138                                         xstats_names, ids, XSTATS_MAX);
139         ret = rte_event_dev_xstats_get(evdev,
140                                         RTE_EVENT_DEV_XSTATS_PORT, 1,
141                                         ids, values, ret);
142         if (ret > (signed int)XSTATS_MAX)
143                 printf("%s %d: more xstats available than space\n",
144                                 __func__, __LINE__);
145         for (i = 0; (signed int)i < ret; i++) {
146                 printf("%d : %s : %"PRIu64"\n",
147                                 i, xstats_names[i].name, values[i]);
148         }
149
150         /* Queue names / values */
151         ret = rte_event_dev_xstats_names_get(evdev,
152                                         RTE_EVENT_DEV_XSTATS_QUEUE, 0,
153                                         xstats_names, ids, XSTATS_MAX);
154         ret = rte_event_dev_xstats_get(evdev,
155                                         RTE_EVENT_DEV_XSTATS_QUEUE,
156                                         1, ids, values, ret);
157         if (ret > (signed int)XSTATS_MAX)
158                 printf("%s %d: more xstats available than space\n",
159                                 __func__, __LINE__);
160         for (i = 0; (signed int)i < ret; i++) {
161                 printf("%d : %s : %"PRIu64"\n",
162                                 i, xstats_names[i].name, values[i]);
163         }
164 }
165
166 /* initialization and config */
167 static inline int
168 init(struct test *t, int nb_queues, int nb_ports)
169 {
170         struct rte_event_dev_config config = {
171                         .nb_event_queues = nb_queues,
172                         .nb_event_ports = nb_ports,
173                         .nb_event_queue_flows = 1024,
174                         .nb_events_limit = 4096,
175                         .nb_event_port_dequeue_depth = 128,
176                         .nb_event_port_enqueue_depth = 128,
177         };
178         int ret;
179
180         void *temp = t->mbuf_pool; /* save and restore mbuf pool */
181
182         memset(t, 0, sizeof(*t));
183         t->mbuf_pool = temp;
184
185         ret = rte_event_dev_configure(evdev, &config);
186         if (ret < 0)
187                 printf("%d: Error configuring device\n", __LINE__);
188         return ret;
189 };
190
191 static inline int
192 create_ports(struct test *t, int num_ports)
193 {
194         int i;
195         static const struct rte_event_port_conf conf = {
196                         .new_event_threshold = 1024,
197                         .dequeue_depth = 32,
198                         .enqueue_depth = 64,
199         };
200         if (num_ports > MAX_PORTS)
201                 return -1;
202
203         for (i = 0; i < num_ports; i++) {
204                 if (rte_event_port_setup(evdev, i, &conf) < 0) {
205                         printf("Error setting up port %d\n", i);
206                         return -1;
207                 }
208                 t->port[i] = i;
209         }
210
211         return 0;
212 }
213
214 static inline int
215 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
216 {
217         int i;
218
219         /* Q creation */
220         const struct rte_event_queue_conf conf = {
221                         .event_queue_cfg = flags,
222                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
223                         .nb_atomic_flows = 1024,
224                         .nb_atomic_order_sequences = 1024,
225         };
226
227         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
228                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
229                         printf("%d: error creating qid %d\n", __LINE__, i);
230                         return -1;
231                 }
232                 t->qid[i] = i;
233         }
234         t->nb_qids += num_qids;
235         if (t->nb_qids > MAX_QIDS)
236                 return -1;
237
238         return 0;
239 }
240
241 static inline int
242 create_atomic_qids(struct test *t, int num_qids)
243 {
244         return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY);
245 }
246
247 static inline int
248 create_ordered_qids(struct test *t, int num_qids)
249 {
250         return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ORDERED_ONLY);
251 }
252
253
254 static inline int
255 create_unordered_qids(struct test *t, int num_qids)
256 {
257         return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY);
258 }
259
260 static inline int
261 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
262 {
263         int i;
264
265         /* Q creation */
266         static const struct rte_event_queue_conf conf = {
267                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
268                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
269                         .nb_atomic_flows = 1024,
270                         .nb_atomic_order_sequences = 1024,
271         };
272
273         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
274                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
275                         printf("%d: error creating qid %d\n", __LINE__, i);
276                         return -1;
277                 }
278                 t->qid[i] = i;
279
280                 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
281                                 &t->qid[i], NULL, 1) != 1) {
282                         printf("%d: error creating link for qid %d\n",
283                                         __LINE__, i);
284                         return -1;
285                 }
286         }
287         t->nb_qids += num_qids;
288         if (t->nb_qids > MAX_QIDS)
289                 return -1;
290
291         return 0;
292 }
293
294 /* destruction */
295 static inline int
296 cleanup(struct test *t __rte_unused)
297 {
298         rte_event_dev_stop(evdev);
299         rte_event_dev_close(evdev);
300         return 0;
301 };
302
303 struct test_event_dev_stats {
304         uint64_t rx_pkts;       /**< Total packets received */
305         uint64_t rx_dropped;    /**< Total packets dropped (Eg Invalid QID) */
306         uint64_t tx_pkts;       /**< Total packets transmitted */
307
308         /** Packets received on this port */
309         uint64_t port_rx_pkts[MAX_PORTS];
310         /** Packets dropped on this port */
311         uint64_t port_rx_dropped[MAX_PORTS];
312         /** Packets inflight on this port */
313         uint64_t port_inflight[MAX_PORTS];
314         /** Packets transmitted on this port */
315         uint64_t port_tx_pkts[MAX_PORTS];
316         /** Packets received on this qid */
317         uint64_t qid_rx_pkts[MAX_QIDS];
318         /** Packets dropped on this qid */
319         uint64_t qid_rx_dropped[MAX_QIDS];
320         /** Packets transmitted on this qid */
321         uint64_t qid_tx_pkts[MAX_QIDS];
322 };
323
324 static inline int
325 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
326 {
327         static uint32_t i;
328         static uint32_t total_ids[3]; /* rx, tx and drop */
329         static uint32_t port_rx_pkts_ids[MAX_PORTS];
330         static uint32_t port_rx_dropped_ids[MAX_PORTS];
331         static uint32_t port_inflight_ids[MAX_PORTS];
332         static uint32_t port_tx_pkts_ids[MAX_PORTS];
333         static uint32_t qid_rx_pkts_ids[MAX_QIDS];
334         static uint32_t qid_rx_dropped_ids[MAX_QIDS];
335         static uint32_t qid_tx_pkts_ids[MAX_QIDS];
336
337
338         stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
339                         "dev_rx", &total_ids[0]);
340         stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
341                         "dev_drop", &total_ids[1]);
342         stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
343                         "dev_tx", &total_ids[2]);
344         for (i = 0; i < MAX_PORTS; i++) {
345                 char name[32];
346                 snprintf(name, sizeof(name), "port_%u_rx", i);
347                 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
348                                 dev_id, name, &port_rx_pkts_ids[i]);
349                 snprintf(name, sizeof(name), "port_%u_drop", i);
350                 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
351                                 dev_id, name, &port_rx_dropped_ids[i]);
352                 snprintf(name, sizeof(name), "port_%u_inflight", i);
353                 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
354                                 dev_id, name, &port_inflight_ids[i]);
355                 snprintf(name, sizeof(name), "port_%u_tx", i);
356                 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
357                                 dev_id, name, &port_tx_pkts_ids[i]);
358         }
359         for (i = 0; i < MAX_QIDS; i++) {
360                 char name[32];
361                 snprintf(name, sizeof(name), "qid_%u_rx", i);
362                 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
363                                 dev_id, name, &qid_rx_pkts_ids[i]);
364                 snprintf(name, sizeof(name), "qid_%u_drop", i);
365                 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
366                                 dev_id, name, &qid_rx_dropped_ids[i]);
367                 snprintf(name, sizeof(name), "qid_%u_tx", i);
368                 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
369                                 dev_id, name, &qid_tx_pkts_ids[i]);
370         }
371
372         return 0;
373 }
374
375 /* run_prio_packet_test
376  * This performs a basic packet priority check on the test instance passed in.
377  * It is factored out of the main priority tests as the same tests must be
378  * performed to ensure prioritization of each type of QID.
379  *
380  * Requirements:
381  *  - An initialized test structure, including mempool
382  *  - t->port[0] is initialized for both Enq / Deq of packets to the QID
383  *  - t->qid[0] is the QID to be tested
384  *  - if LB QID, the CQ must be mapped to the QID.
385  */
386 static int
387 run_prio_packet_test(struct test *t)
388 {
389         int err;
390         const uint32_t MAGIC_SEQN[] = {4711, 1234};
391         const uint32_t PRIORITY[] = {
392                 RTE_EVENT_DEV_PRIORITY_NORMAL,
393                 RTE_EVENT_DEV_PRIORITY_HIGHEST
394         };
395         unsigned int i;
396         for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
397                 /* generate pkt and enqueue */
398                 struct rte_event ev;
399                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
400                 if (!arp) {
401                         printf("%d: gen of pkt failed\n", __LINE__);
402                         return -1;
403                 }
404                 arp->seqn = MAGIC_SEQN[i];
405
406                 ev = (struct rte_event){
407                         .priority = PRIORITY[i],
408                         .op = RTE_EVENT_OP_NEW,
409                         .queue_id = t->qid[0],
410                         .mbuf = arp
411                 };
412                 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
413                 if (err < 0) {
414                         printf("%d: error failed to enqueue\n", __LINE__);
415                         return -1;
416                 }
417         }
418
419         rte_event_schedule(evdev);
420
421         struct test_event_dev_stats stats;
422         err = test_event_dev_stats_get(evdev, &stats);
423         if (err) {
424                 printf("%d: error failed to get stats\n", __LINE__);
425                 return -1;
426         }
427
428         if (stats.port_rx_pkts[t->port[0]] != 2) {
429                 printf("%d: error stats incorrect for directed port\n",
430                                 __LINE__);
431                 rte_event_dev_dump(evdev, stdout);
432                 return -1;
433         }
434
435         struct rte_event ev, ev2;
436         uint32_t deq_pkts;
437         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
438         if (deq_pkts != 1) {
439                 printf("%d: error failed to deq\n", __LINE__);
440                 rte_event_dev_dump(evdev, stdout);
441                 return -1;
442         }
443         if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
444                 printf("%d: first packet out not highest priority\n",
445                                 __LINE__);
446                 rte_event_dev_dump(evdev, stdout);
447                 return -1;
448         }
449         rte_pktmbuf_free(ev.mbuf);
450
451         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
452         if (deq_pkts != 1) {
453                 printf("%d: error failed to deq\n", __LINE__);
454                 rte_event_dev_dump(evdev, stdout);
455                 return -1;
456         }
457         if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
458                 printf("%d: second packet out not lower priority\n",
459                                 __LINE__);
460                 rte_event_dev_dump(evdev, stdout);
461                 return -1;
462         }
463         rte_pktmbuf_free(ev2.mbuf);
464
465         cleanup(t);
466         return 0;
467 }
468
469 static int
470 test_single_directed_packet(struct test *t)
471 {
472         const int rx_enq = 0;
473         const int wrk_enq = 2;
474         int err;
475
476         /* Create instance with 3 directed QIDs going to 3 ports */
477         if (init(t, 3, 3) < 0 ||
478                         create_ports(t, 3) < 0 ||
479                         create_directed_qids(t, 3, t->port) < 0)
480                 return -1;
481
482         if (rte_event_dev_start(evdev) < 0) {
483                 printf("%d: Error with start call\n", __LINE__);
484                 return -1;
485         }
486
487         /************** FORWARD ****************/
488         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
489         struct rte_event ev = {
490                         .op = RTE_EVENT_OP_NEW,
491                         .queue_id = wrk_enq,
492                         .mbuf = arp,
493         };
494
495         if (!arp) {
496                 printf("%d: gen of pkt failed\n", __LINE__);
497                 return -1;
498         }
499
500         const uint32_t MAGIC_SEQN = 4711;
501         arp->seqn = MAGIC_SEQN;
502
503         /* generate pkt and enqueue */
504         err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
505         if (err < 0) {
506                 printf("%d: error failed to enqueue\n", __LINE__);
507                 return -1;
508         }
509
510         /* Run schedule() as dir packets may need to be re-ordered */
511         rte_event_schedule(evdev);
512
513         struct test_event_dev_stats stats;
514         err = test_event_dev_stats_get(evdev, &stats);
515         if (err) {
516                 printf("%d: error failed to get stats\n", __LINE__);
517                 return -1;
518         }
519
520         if (stats.port_rx_pkts[rx_enq] != 1) {
521                 printf("%d: error stats incorrect for directed port\n",
522                                 __LINE__);
523                 return -1;
524         }
525
526         uint32_t deq_pkts;
527         deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
528         if (deq_pkts != 1) {
529                 printf("%d: error failed to deq\n", __LINE__);
530                 return -1;
531         }
532
533         err = test_event_dev_stats_get(evdev, &stats);
534         if (stats.port_rx_pkts[wrk_enq] != 0 &&
535                         stats.port_rx_pkts[wrk_enq] != 1) {
536                 printf("%d: error directed stats post-dequeue\n", __LINE__);
537                 return -1;
538         }
539
540         if (ev.mbuf->seqn != MAGIC_SEQN) {
541                 printf("%d: error magic sequence number not dequeued\n",
542                                 __LINE__);
543                 return -1;
544         }
545
546         rte_pktmbuf_free(ev.mbuf);
547         cleanup(t);
548         return 0;
549 }
550
551
552 static int
553 test_priority_directed(struct test *t)
554 {
555         if (init(t, 1, 1) < 0 ||
556                         create_ports(t, 1) < 0 ||
557                         create_directed_qids(t, 1, t->port) < 0) {
558                 printf("%d: Error initializing device\n", __LINE__);
559                 return -1;
560         }
561
562         if (rte_event_dev_start(evdev) < 0) {
563                 printf("%d: Error with start call\n", __LINE__);
564                 return -1;
565         }
566
567         return run_prio_packet_test(t);
568 }
569
570 static int
571 test_priority_atomic(struct test *t)
572 {
573         if (init(t, 1, 1) < 0 ||
574                         create_ports(t, 1) < 0 ||
575                         create_atomic_qids(t, 1) < 0) {
576                 printf("%d: Error initializing device\n", __LINE__);
577                 return -1;
578         }
579
580         /* map the QID */
581         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
582                 printf("%d: error mapping qid to port\n", __LINE__);
583                 return -1;
584         }
585         if (rte_event_dev_start(evdev) < 0) {
586                 printf("%d: Error with start call\n", __LINE__);
587                 return -1;
588         }
589
590         return run_prio_packet_test(t);
591 }
592
593 static int
594 test_priority_ordered(struct test *t)
595 {
596         if (init(t, 1, 1) < 0 ||
597                         create_ports(t, 1) < 0 ||
598                         create_ordered_qids(t, 1) < 0) {
599                 printf("%d: Error initializing device\n", __LINE__);
600                 return -1;
601         }
602
603         /* map the QID */
604         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
605                 printf("%d: error mapping qid to port\n", __LINE__);
606                 return -1;
607         }
608         if (rte_event_dev_start(evdev) < 0) {
609                 printf("%d: Error with start call\n", __LINE__);
610                 return -1;
611         }
612
613         return run_prio_packet_test(t);
614 }
615
616 static int
617 test_priority_unordered(struct test *t)
618 {
619         if (init(t, 1, 1) < 0 ||
620                         create_ports(t, 1) < 0 ||
621                         create_unordered_qids(t, 1) < 0) {
622                 printf("%d: Error initializing device\n", __LINE__);
623                 return -1;
624         }
625
626         /* map the QID */
627         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
628                 printf("%d: error mapping qid to port\n", __LINE__);
629                 return -1;
630         }
631         if (rte_event_dev_start(evdev) < 0) {
632                 printf("%d: Error with start call\n", __LINE__);
633                 return -1;
634         }
635
636         return run_prio_packet_test(t);
637 }
638
639 static int
640 burst_packets(struct test *t)
641 {
642         /************** CONFIG ****************/
643         uint32_t i;
644         int err;
645         int ret;
646
647         /* Create instance with 2 ports and 2 queues */
648         if (init(t, 2, 2) < 0 ||
649                         create_ports(t, 2) < 0 ||
650                         create_atomic_qids(t, 2) < 0) {
651                 printf("%d: Error initializing device\n", __LINE__);
652                 return -1;
653         }
654
655         /* CQ mapping to QID */
656         ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
657         if (ret != 1) {
658                 printf("%d: error mapping lb qid0\n", __LINE__);
659                 return -1;
660         }
661         ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
662         if (ret != 1) {
663                 printf("%d: error mapping lb qid1\n", __LINE__);
664                 return -1;
665         }
666
667         if (rte_event_dev_start(evdev) < 0) {
668                 printf("%d: Error with start call\n", __LINE__);
669                 return -1;
670         }
671
672         /************** FORWARD ****************/
673         const uint32_t rx_port = 0;
674         const uint32_t NUM_PKTS = 2;
675
676         for (i = 0; i < NUM_PKTS; i++) {
677                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
678                 if (!arp) {
679                         printf("%d: error generating pkt\n", __LINE__);
680                         return -1;
681                 }
682
683                 struct rte_event ev = {
684                                 .op = RTE_EVENT_OP_NEW,
685                                 .queue_id = i % 2,
686                                 .flow_id = i % 3,
687                                 .mbuf = arp,
688                 };
689                 /* generate pkt and enqueue */
690                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
691                 if (err < 0) {
692                         printf("%d: Failed to enqueue\n", __LINE__);
693                         return -1;
694                 }
695         }
696         rte_event_schedule(evdev);
697
698         /* Check stats for all NUM_PKTS arrived to sched core */
699         struct test_event_dev_stats stats;
700
701         err = test_event_dev_stats_get(evdev, &stats);
702         if (err) {
703                 printf("%d: failed to get stats\n", __LINE__);
704                 return -1;
705         }
706         if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
707                 printf("%d: Sched core didn't receive all %d pkts\n",
708                                 __LINE__, NUM_PKTS);
709                 rte_event_dev_dump(evdev, stdout);
710                 return -1;
711         }
712
713         uint32_t deq_pkts;
714         int p;
715
716         deq_pkts = 0;
717         /******** DEQ QID 1 *******/
718         do {
719                 struct rte_event ev;
720                 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
721                 deq_pkts += p;
722                 rte_pktmbuf_free(ev.mbuf);
723         } while (p);
724
725         if (deq_pkts != NUM_PKTS/2) {
726                 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
727                                 __LINE__);
728                 return -1;
729         }
730
731         /******** DEQ QID 2 *******/
732         deq_pkts = 0;
733         do {
734                 struct rte_event ev;
735                 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
736                 deq_pkts += p;
737                 rte_pktmbuf_free(ev.mbuf);
738         } while (p);
739         if (deq_pkts != NUM_PKTS/2) {
740                 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
741                                 __LINE__);
742                 return -1;
743         }
744
745         cleanup(t);
746         return 0;
747 }
748
749 static int
750 abuse_inflights(struct test *t)
751 {
752         const int rx_enq = 0;
753         const int wrk_enq = 2;
754         int err;
755
756         /* Create instance with 4 ports */
757         if (init(t, 1, 4) < 0 ||
758                         create_ports(t, 4) < 0 ||
759                         create_atomic_qids(t, 1) < 0) {
760                 printf("%d: Error initializing device\n", __LINE__);
761                 return -1;
762         }
763
764         /* CQ mapping to QID */
765         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
766         if (err != 1) {
767                 printf("%d: error mapping lb qid\n", __LINE__);
768                 cleanup(t);
769                 return -1;
770         }
771
772         if (rte_event_dev_start(evdev) < 0) {
773                 printf("%d: Error with start call\n", __LINE__);
774                 return -1;
775         }
776
777         /* Enqueue op only */
778         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
779         if (err < 0) {
780                 printf("%d: Failed to enqueue\n", __LINE__);
781                 return -1;
782         }
783
784         /* schedule */
785         rte_event_schedule(evdev);
786
787         struct test_event_dev_stats stats;
788
789         err = test_event_dev_stats_get(evdev, &stats);
790         if (err) {
791                 printf("%d: failed to get stats\n", __LINE__);
792                 return -1;
793         }
794
795         if (stats.rx_pkts != 0 ||
796                         stats.tx_pkts != 0 ||
797                         stats.port_inflight[wrk_enq] != 0) {
798                 printf("%d: Sched core didn't handle pkt as expected\n",
799                                 __LINE__);
800                 return -1;
801         }
802
803         cleanup(t);
804         return 0;
805 }
806
807 static int
808 xstats_tests(struct test *t)
809 {
810         const int wrk_enq = 2;
811         int err;
812
813         /* Create instance with 4 ports */
814         if (init(t, 1, 4) < 0 ||
815                         create_ports(t, 4) < 0 ||
816                         create_atomic_qids(t, 1) < 0) {
817                 printf("%d: Error initializing device\n", __LINE__);
818                 return -1;
819         }
820
821         /* CQ mapping to QID */
822         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
823         if (err != 1) {
824                 printf("%d: error mapping lb qid\n", __LINE__);
825                 cleanup(t);
826                 return -1;
827         }
828
829         if (rte_event_dev_start(evdev) < 0) {
830                 printf("%d: Error with start call\n", __LINE__);
831                 return -1;
832         }
833
834         const uint32_t XSTATS_MAX = 1024;
835
836         uint32_t i;
837         uint32_t ids[XSTATS_MAX];
838         uint64_t values[XSTATS_MAX];
839         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
840
841         for (i = 0; i < XSTATS_MAX; i++)
842                 ids[i] = i;
843
844         /* Device names / values */
845         int ret = rte_event_dev_xstats_names_get(evdev,
846                                         RTE_EVENT_DEV_XSTATS_DEVICE,
847                                         0, xstats_names, ids, XSTATS_MAX);
848         if (ret != 6) {
849                 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
850                 return -1;
851         }
852         ret = rte_event_dev_xstats_get(evdev,
853                                         RTE_EVENT_DEV_XSTATS_DEVICE,
854                                         0, ids, values, ret);
855         if (ret != 6) {
856                 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
857                 return -1;
858         }
859
860         /* Port names / values */
861         ret = rte_event_dev_xstats_names_get(evdev,
862                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
863                                         xstats_names, ids, XSTATS_MAX);
864         if (ret != 21) {
865                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
866                 return -1;
867         }
868         ret = rte_event_dev_xstats_get(evdev,
869                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
870                                         ids, values, ret);
871         if (ret != 21) {
872                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
873                 return -1;
874         }
875
876         /* Queue names / values */
877         ret = rte_event_dev_xstats_names_get(evdev,
878                                         RTE_EVENT_DEV_XSTATS_QUEUE,
879                                         0, xstats_names, ids, XSTATS_MAX);
880         if (ret != 17) {
881                 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
882                 return -1;
883         }
884
885         /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
886         ret = rte_event_dev_xstats_get(evdev,
887                                         RTE_EVENT_DEV_XSTATS_QUEUE,
888                                         1, ids, values, ret);
889         if (ret != -EINVAL) {
890                 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
891                 return -1;
892         }
893
894         ret = rte_event_dev_xstats_get(evdev,
895                                         RTE_EVENT_DEV_XSTATS_QUEUE,
896                                         0, ids, values, ret);
897         if (ret != 17) {
898                 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
899                 return -1;
900         }
901
902         /* enqueue packets to check values */
903         for (i = 0; i < 3; i++) {
904                 struct rte_event ev;
905                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
906                 if (!arp) {
907                         printf("%d: gen of pkt failed\n", __LINE__);
908                         return -1;
909                 }
910                 ev.queue_id = t->qid[i];
911                 ev.op = RTE_EVENT_OP_NEW;
912                 ev.mbuf = arp;
913                 ev.flow_id = 7;
914                 arp->seqn = i;
915
916                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
917                 if (err != 1) {
918                         printf("%d: Failed to enqueue\n", __LINE__);
919                         return -1;
920                 }
921         }
922
923         rte_event_schedule(evdev);
924
925         /* Device names / values */
926         int num_stats = rte_event_dev_xstats_names_get(evdev,
927                                         RTE_EVENT_DEV_XSTATS_DEVICE, 0,
928                                         xstats_names, ids, XSTATS_MAX);
929         if (num_stats < 0)
930                 goto fail;
931         ret = rte_event_dev_xstats_get(evdev,
932                                         RTE_EVENT_DEV_XSTATS_DEVICE,
933                                         0, ids, values, num_stats);
934         static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
935         for (i = 0; (signed int)i < ret; i++) {
936                 if (expected[i] != values[i]) {
937                         printf(
938                                 "%d Error xstat %d (id %d) %s : %"PRIu64
939                                 ", expect %"PRIu64"\n",
940                                 __LINE__, i, ids[i], xstats_names[i].name,
941                                 values[i], expected[i]);
942                         goto fail;
943                 }
944         }
945
946         ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
947                                         0, NULL, 0);
948
949         /* ensure reset statistics are zero-ed */
950         static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
951         ret = rte_event_dev_xstats_get(evdev,
952                                         RTE_EVENT_DEV_XSTATS_DEVICE,
953                                         0, ids, values, num_stats);
954         for (i = 0; (signed int)i < ret; i++) {
955                 if (expected_zero[i] != values[i]) {
956                         printf(
957                                 "%d Error, xstat %d (id %d) %s : %"PRIu64
958                                 ", expect %"PRIu64"\n",
959                                 __LINE__, i, ids[i], xstats_names[i].name,
960                                 values[i], expected_zero[i]);
961                         goto fail;
962                 }
963         }
964
965         /* port reset checks */
966         num_stats = rte_event_dev_xstats_names_get(evdev,
967                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
968                                         xstats_names, ids, XSTATS_MAX);
969         if (num_stats < 0)
970                 goto fail;
971         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
972                                         0, ids, values, num_stats);
973
974         static const uint64_t port_expected[] = {
975                 3 /* rx */,
976                 0 /* tx */,
977                 0 /* drop */,
978                 0 /* inflights */,
979                 0 /* avg pkt cycles */,
980                 29 /* credits */,
981                 0 /* rx ring used */,
982                 4096 /* rx ring free */,
983                 0 /* cq ring used */,
984                 32 /* cq ring free */,
985                 0 /* dequeue calls */,
986                 /* 10 dequeue burst buckets */
987                 0, 0, 0, 0, 0,
988                 0, 0, 0, 0, 0,
989         };
990         if (ret != RTE_DIM(port_expected)) {
991                 printf(
992                         "%s %d: wrong number of port stats (%d), expected %zu\n",
993                         __func__, __LINE__, ret, RTE_DIM(port_expected));
994         }
995
996         for (i = 0; (signed int)i < ret; i++) {
997                 if (port_expected[i] != values[i]) {
998                         printf(
999                                 "%s : %d: Error stat %s is %"PRIu64
1000                                 ", expected %"PRIu64"\n",
1001                                 __func__, __LINE__, xstats_names[i].name,
1002                                 values[i], port_expected[i]);
1003                         goto fail;
1004                 }
1005         }
1006
1007         ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1008                                         0, NULL, 0);
1009
1010         /* ensure reset statistics are zero-ed */
1011         static const uint64_t port_expected_zero[] = {
1012                 0 /* rx */,
1013                 0 /* tx */,
1014                 0 /* drop */,
1015                 0 /* inflights */,
1016                 0 /* avg pkt cycles */,
1017                 29 /* credits */,
1018                 0 /* rx ring used */,
1019                 4096 /* rx ring free */,
1020                 0 /* cq ring used */,
1021                 32 /* cq ring free */,
1022                 0 /* dequeue calls */,
1023                 /* 10 dequeue burst buckets */
1024                 0, 0, 0, 0, 0,
1025                 0, 0, 0, 0, 0,
1026         };
1027         ret = rte_event_dev_xstats_get(evdev,
1028                                         RTE_EVENT_DEV_XSTATS_PORT,
1029                                         0, ids, values, num_stats);
1030         for (i = 0; (signed int)i < ret; i++) {
1031                 if (port_expected_zero[i] != values[i]) {
1032                         printf(
1033                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1034                                 ", expect %"PRIu64"\n",
1035                                 __LINE__, i, ids[i], xstats_names[i].name,
1036                                 values[i], port_expected_zero[i]);
1037                         goto fail;
1038                 }
1039         }
1040
1041         /* QUEUE STATS TESTS */
1042         num_stats = rte_event_dev_xstats_names_get(evdev,
1043                                                 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1044                                                 xstats_names, ids, XSTATS_MAX);
1045         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1046                                         0, ids, values, num_stats);
1047         if (ret < 0) {
1048                 printf("xstats get returned %d\n", ret);
1049                 goto fail;
1050         }
1051         if ((unsigned int)ret > XSTATS_MAX)
1052                 printf("%s %d: more xstats available than space\n",
1053                                 __func__, __LINE__);
1054
1055         static const uint64_t queue_expected[] = {
1056                 3 /* rx */,
1057                 3 /* tx */,
1058                 0 /* drop */,
1059                 3 /* inflights */,
1060                 512 /* iq size */,
1061                 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1062                 /* QID-to-Port: pinned_flows, packets */
1063                 0, 0,
1064                 0, 0,
1065                 1, 3,
1066                 0, 0,
1067         };
1068         for (i = 0; (signed int)i < ret; i++) {
1069                 if (queue_expected[i] != values[i]) {
1070                         printf(
1071                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1072                                 ", expect %"PRIu64"\n",
1073                                 __LINE__, i, ids[i], xstats_names[i].name,
1074                                 values[i], queue_expected[i]);
1075                         goto fail;
1076                 }
1077         }
1078
1079         /* Reset the queue stats here */
1080         ret = rte_event_dev_xstats_reset(evdev,
1081                                         RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1082                                         NULL,
1083                                         0);
1084
1085         /* Verify that the resetable stats are reset, and others are not */
1086         static const uint64_t queue_expected_zero[] = {
1087                 0 /* rx */,
1088                 0 /* tx */,
1089                 0 /* drop */,
1090                 3 /* inflight */,
1091                 512 /* iq size */,
1092                 0, 0, 0, 0, /* 4 iq used */
1093                 /* QID-to-Port: pinned_flows, packets */
1094                 0, 0,
1095                 0, 0,
1096                 1, 0,
1097                 0, 0,
1098         };
1099
1100         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1101                                         ids, values, num_stats);
1102         int fails = 0;
1103         for (i = 0; (signed int)i < ret; i++) {
1104                 if (queue_expected_zero[i] != values[i]) {
1105                         printf(
1106                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1107                                 ", expect %"PRIu64"\n",
1108                                 __LINE__, i, ids[i], xstats_names[i].name,
1109                                 values[i], queue_expected_zero[i]);
1110                         fails++;
1111                 }
1112         }
1113         if (fails) {
1114                 printf("%d : %d of values were not as expected above\n",
1115                                 __LINE__, fails);
1116                 goto fail;
1117         }
1118
1119         cleanup(t);
1120         return 0;
1121
1122 fail:
1123         rte_event_dev_dump(0, stdout);
1124         cleanup(t);
1125         return -1;
1126 }
1127
1128
1129 static int
1130 xstats_id_abuse_tests(struct test *t)
1131 {
1132         int err;
1133         const uint32_t XSTATS_MAX = 1024;
1134         const uint32_t link_port = 2;
1135
1136         uint32_t ids[XSTATS_MAX];
1137         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1138
1139         /* Create instance with 4 ports */
1140         if (init(t, 1, 4) < 0 ||
1141                         create_ports(t, 4) < 0 ||
1142                         create_atomic_qids(t, 1) < 0) {
1143                 printf("%d: Error initializing device\n", __LINE__);
1144                 goto fail;
1145         }
1146
1147         err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1148         if (err != 1) {
1149                 printf("%d: error mapping lb qid\n", __LINE__);
1150                 goto fail;
1151         }
1152
1153         if (rte_event_dev_start(evdev) < 0) {
1154                 printf("%d: Error with start call\n", __LINE__);
1155                 goto fail;
1156         }
1157
1158         /* no test for device, as it ignores the port/q number */
1159         int num_stats = rte_event_dev_xstats_names_get(evdev,
1160                                         RTE_EVENT_DEV_XSTATS_PORT,
1161                                         UINT8_MAX-1, xstats_names, ids,
1162                                         XSTATS_MAX);
1163         if (num_stats != 0) {
1164                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1165                                 0, num_stats);
1166                 goto fail;
1167         }
1168
1169         num_stats = rte_event_dev_xstats_names_get(evdev,
1170                                         RTE_EVENT_DEV_XSTATS_QUEUE,
1171                                         UINT8_MAX-1, xstats_names, ids,
1172                                         XSTATS_MAX);
1173         if (num_stats != 0) {
1174                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1175                                 0, num_stats);
1176                 goto fail;
1177         }
1178
1179         cleanup(t);
1180         return 0;
1181 fail:
1182         cleanup(t);
1183         return -1;
1184 }
1185
1186 static int
1187 port_reconfig_credits(struct test *t)
1188 {
1189         if (init(t, 1, 1) < 0) {
1190                 printf("%d: Error initializing device\n", __LINE__);
1191                 return -1;
1192         }
1193
1194         uint32_t i;
1195         const uint32_t NUM_ITERS = 32;
1196         for (i = 0; i < NUM_ITERS; i++) {
1197                 const struct rte_event_queue_conf conf = {
1198                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1199                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1200                         .nb_atomic_flows = 1024,
1201                         .nb_atomic_order_sequences = 1024,
1202                 };
1203                 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1204                         printf("%d: error creating qid\n", __LINE__);
1205                         return -1;
1206                 }
1207                 t->qid[0] = 0;
1208
1209                 static const struct rte_event_port_conf port_conf = {
1210                                 .new_event_threshold = 128,
1211                                 .dequeue_depth = 32,
1212                                 .enqueue_depth = 64,
1213                 };
1214                 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1215                         printf("%d Error setting up port\n", __LINE__);
1216                         return -1;
1217                 }
1218
1219                 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1220                 if (links != 1) {
1221                         printf("%d: error mapping lb qid\n", __LINE__);
1222                         goto fail;
1223                 }
1224
1225                 if (rte_event_dev_start(evdev) < 0) {
1226                         printf("%d: Error with start call\n", __LINE__);
1227                         goto fail;
1228                 }
1229
1230                 const uint32_t NPKTS = 1;
1231                 uint32_t j;
1232                 for (j = 0; j < NPKTS; j++) {
1233                         struct rte_event ev;
1234                         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1235                         if (!arp) {
1236                                 printf("%d: gen of pkt failed\n", __LINE__);
1237                                 goto fail;
1238                         }
1239                         ev.queue_id = t->qid[0];
1240                         ev.op = RTE_EVENT_OP_NEW;
1241                         ev.mbuf = arp;
1242                         int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1243                         if (err != 1) {
1244                                 printf("%d: Failed to enqueue\n", __LINE__);
1245                                 rte_event_dev_dump(0, stdout);
1246                                 goto fail;
1247                         }
1248                 }
1249
1250                 rte_event_schedule(evdev);
1251
1252                 struct rte_event ev[NPKTS];
1253                 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1254                                                         NPKTS, 0);
1255                 if (deq != 1)
1256                         printf("%d error; no packet dequeued\n", __LINE__);
1257
1258                 /* let cleanup below stop the device on last iter */
1259                 if (i != NUM_ITERS-1)
1260                         rte_event_dev_stop(evdev);
1261         }
1262
1263         cleanup(t);
1264         return 0;
1265 fail:
1266         cleanup(t);
1267         return -1;
1268 }
1269
1270 static int
1271 port_single_lb_reconfig(struct test *t)
1272 {
1273         if (init(t, 2, 2) < 0) {
1274                 printf("%d: Error initializing device\n", __LINE__);
1275                 goto fail;
1276         }
1277
1278         static const struct rte_event_queue_conf conf_lb_atomic = {
1279                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1280                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1281                 .nb_atomic_flows = 1024,
1282                 .nb_atomic_order_sequences = 1024,
1283         };
1284         if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1285                 printf("%d: error creating qid\n", __LINE__);
1286                 goto fail;
1287         }
1288
1289         static const struct rte_event_queue_conf conf_single_link = {
1290                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1291                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1292                 .nb_atomic_flows = 1024,
1293                 .nb_atomic_order_sequences = 1024,
1294         };
1295         if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1296                 printf("%d: error creating qid\n", __LINE__);
1297                 goto fail;
1298         }
1299
1300         struct rte_event_port_conf port_conf = {
1301                 .new_event_threshold = 128,
1302                 .dequeue_depth = 32,
1303                 .enqueue_depth = 64,
1304         };
1305         if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1306                 printf("%d Error setting up port\n", __LINE__);
1307                 goto fail;
1308         }
1309         if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1310                 printf("%d Error setting up port\n", __LINE__);
1311                 goto fail;
1312         }
1313
1314         /* link port to lb queue */
1315         uint8_t queue_id = 0;
1316         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1317                 printf("%d: error creating link for qid\n", __LINE__);
1318                 goto fail;
1319         }
1320
1321         int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1322         if (ret != 1) {
1323                 printf("%d: Error unlinking lb port\n", __LINE__);
1324                 goto fail;
1325         }
1326
1327         queue_id = 1;
1328         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1329                 printf("%d: error creating link for qid\n", __LINE__);
1330                 goto fail;
1331         }
1332
1333         queue_id = 0;
1334         int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1335         if (err != 1) {
1336                 printf("%d: error mapping lb qid\n", __LINE__);
1337                 goto fail;
1338         }
1339
1340         if (rte_event_dev_start(evdev) < 0) {
1341                 printf("%d: Error with start call\n", __LINE__);
1342                 goto fail;
1343         }
1344
1345         cleanup(t);
1346         return 0;
1347 fail:
1348         cleanup(t);
1349         return -1;
1350 }
1351
1352 static int
1353 xstats_brute_force(struct test *t)
1354 {
1355         uint32_t i;
1356         const uint32_t XSTATS_MAX = 1024;
1357         uint32_t ids[XSTATS_MAX];
1358         uint64_t values[XSTATS_MAX];
1359         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1360
1361
1362         /* Create instance with 4 ports */
1363         if (init(t, 1, 4) < 0 ||
1364                         create_ports(t, 4) < 0 ||
1365                         create_atomic_qids(t, 1) < 0) {
1366                 printf("%d: Error initializing device\n", __LINE__);
1367                 return -1;
1368         }
1369
1370         int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1371         if (err != 1) {
1372                 printf("%d: error mapping lb qid\n", __LINE__);
1373                 goto fail;
1374         }
1375
1376         if (rte_event_dev_start(evdev) < 0) {
1377                 printf("%d: Error with start call\n", __LINE__);
1378                 goto fail;
1379         }
1380
1381         for (i = 0; i < XSTATS_MAX; i++)
1382                 ids[i] = i;
1383
1384         for (i = 0; i < 3; i++) {
1385                 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1386                 uint32_t j;
1387                 for (j = 0; j < UINT8_MAX; j++) {
1388                         rte_event_dev_xstats_names_get(evdev, mode,
1389                                 j, xstats_names, ids, XSTATS_MAX);
1390
1391                         rte_event_dev_xstats_get(evdev, mode, j, ids,
1392                                                  values, XSTATS_MAX);
1393                 }
1394         }
1395
1396         cleanup(t);
1397         return 0;
1398 fail:
1399         cleanup(t);
1400         return -1;
1401 }
1402
1403 static int
1404 xstats_id_reset_tests(struct test *t)
1405 {
1406         const int wrk_enq = 2;
1407         int err;
1408
1409         /* Create instance with 4 ports */
1410         if (init(t, 1, 4) < 0 ||
1411                         create_ports(t, 4) < 0 ||
1412                         create_atomic_qids(t, 1) < 0) {
1413                 printf("%d: Error initializing device\n", __LINE__);
1414                 return -1;
1415         }
1416
1417         /* CQ mapping to QID */
1418         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1419         if (err != 1) {
1420                 printf("%d: error mapping lb qid\n", __LINE__);
1421                 goto fail;
1422         }
1423
1424         if (rte_event_dev_start(evdev) < 0) {
1425                 printf("%d: Error with start call\n", __LINE__);
1426                 goto fail;
1427         }
1428
1429 #define XSTATS_MAX 1024
1430         int ret;
1431         uint32_t i;
1432         uint32_t ids[XSTATS_MAX];
1433         uint64_t values[XSTATS_MAX];
1434         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1435
1436         for (i = 0; i < XSTATS_MAX; i++)
1437                 ids[i] = i;
1438
1439 #define NUM_DEV_STATS 6
1440         /* Device names / values */
1441         int num_stats = rte_event_dev_xstats_names_get(evdev,
1442                                         RTE_EVENT_DEV_XSTATS_DEVICE,
1443                                         0, xstats_names, ids, XSTATS_MAX);
1444         if (num_stats != NUM_DEV_STATS) {
1445                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1446                                 NUM_DEV_STATS, num_stats);
1447                 goto fail;
1448         }
1449         ret = rte_event_dev_xstats_get(evdev,
1450                                         RTE_EVENT_DEV_XSTATS_DEVICE,
1451                                         0, ids, values, num_stats);
1452         if (ret != NUM_DEV_STATS) {
1453                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1454                                 NUM_DEV_STATS, ret);
1455                 goto fail;
1456         }
1457
1458 #define NPKTS 7
1459         for (i = 0; i < NPKTS; i++) {
1460                 struct rte_event ev;
1461                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1462                 if (!arp) {
1463                         printf("%d: gen of pkt failed\n", __LINE__);
1464                         goto fail;
1465                 }
1466                 ev.queue_id = t->qid[i];
1467                 ev.op = RTE_EVENT_OP_NEW;
1468                 ev.mbuf = arp;
1469                 arp->seqn = i;
1470
1471                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1472                 if (err != 1) {
1473                         printf("%d: Failed to enqueue\n", __LINE__);
1474                         goto fail;
1475                 }
1476         }
1477
1478         rte_event_schedule(evdev);
1479
1480         static const char * const dev_names[] = {
1481                 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1482                 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1483         };
1484         uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1485         for (i = 0; (int)i < ret; i++) {
1486                 unsigned int id;
1487                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1488                                                                 dev_names[i],
1489                                                                 &id);
1490                 if (id != i) {
1491                         printf("%d: %s id incorrect, expected %d got %d\n",
1492                                         __LINE__, dev_names[i], i, id);
1493                         goto fail;
1494                 }
1495                 if (val != dev_expected[i]) {
1496                         printf("%d: %s value incorrect, expected %"
1497                                 PRIu64" got %d\n", __LINE__, dev_names[i],
1498                                 dev_expected[i], id);
1499                         goto fail;
1500                 }
1501                 /* reset to zero */
1502                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1503                                                 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1504                                                 &id,
1505                                                 1);
1506                 if (reset_ret) {
1507                         printf("%d: failed to reset successfully\n", __LINE__);
1508                         goto fail;
1509                 }
1510                 dev_expected[i] = 0;
1511                 /* check value again */
1512                 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1513                 if (val != dev_expected[i]) {
1514                         printf("%d: %s value incorrect, expected %"PRIu64
1515                                 " got %"PRIu64"\n", __LINE__, dev_names[i],
1516                                 dev_expected[i], val);
1517                         goto fail;
1518                 }
1519         };
1520
1521 /* 48 is stat offset from start of the devices whole xstats.
1522  * This WILL break every time we add a statistic to a port
1523  * or the device, but there is no other way to test
1524  */
1525 #define PORT_OFF 48
1526 /* num stats for the tested port. CQ size adds more stats to a port */
1527 #define NUM_PORT_STATS 21
1528 /* the port to test. */
1529 #define PORT 2
1530         num_stats = rte_event_dev_xstats_names_get(evdev,
1531                                         RTE_EVENT_DEV_XSTATS_PORT, PORT,
1532                                         xstats_names, ids, XSTATS_MAX);
1533         if (num_stats != NUM_PORT_STATS) {
1534                 printf("%d: expected %d stats, got return %d\n",
1535                         __LINE__, NUM_PORT_STATS, num_stats);
1536                 goto fail;
1537         }
1538         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1539                                         ids, values, num_stats);
1540
1541         if (ret != NUM_PORT_STATS) {
1542                 printf("%d: expected %d stats, got return %d\n",
1543                                 __LINE__, NUM_PORT_STATS, ret);
1544                 goto fail;
1545         }
1546         static const char * const port_names[] = {
1547                 "port_2_rx",
1548                 "port_2_tx",
1549                 "port_2_drop",
1550                 "port_2_inflight",
1551                 "port_2_avg_pkt_cycles",
1552                 "port_2_credits",
1553                 "port_2_rx_ring_used",
1554                 "port_2_rx_ring_free",
1555                 "port_2_cq_ring_used",
1556                 "port_2_cq_ring_free",
1557                 "port_2_dequeue_calls",
1558                 "port_2_dequeues_returning_0",
1559                 "port_2_dequeues_returning_1-4",
1560                 "port_2_dequeues_returning_5-8",
1561                 "port_2_dequeues_returning_9-12",
1562                 "port_2_dequeues_returning_13-16",
1563                 "port_2_dequeues_returning_17-20",
1564                 "port_2_dequeues_returning_21-24",
1565                 "port_2_dequeues_returning_25-28",
1566                 "port_2_dequeues_returning_29-32",
1567                 "port_2_dequeues_returning_33-36",
1568         };
1569         uint64_t port_expected[] = {
1570                 0, /* rx */
1571                 NPKTS, /* tx */
1572                 0, /* drop */
1573                 NPKTS, /* inflight */
1574                 0, /* avg pkt cycles */
1575                 0, /* credits */
1576                 0, /* rx ring used */
1577                 4096, /* rx ring free */
1578                 NPKTS,  /* cq ring used */
1579                 25, /* cq ring free */
1580                 0, /* dequeue zero calls */
1581                 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1582                 0, 0, 0, 0, 0,
1583         };
1584         uint64_t port_expected_zero[] = {
1585                 0, /* rx */
1586                 0, /* tx */
1587                 0, /* drop */
1588                 NPKTS, /* inflight */
1589                 0, /* avg pkt cycles */
1590                 0, /* credits */
1591                 0, /* rx ring used */
1592                 4096, /* rx ring free */
1593                 NPKTS,  /* cq ring used */
1594                 25, /* cq ring free */
1595                 0, /* dequeue zero calls */
1596                 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1597                 0, 0, 0, 0, 0,
1598         };
1599         if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1600                         RTE_DIM(port_names) != NUM_PORT_STATS) {
1601                 printf("%d: port array of wrong size\n", __LINE__);
1602                 goto fail;
1603         }
1604
1605         int failed = 0;
1606         for (i = 0; (int)i < ret; i++) {
1607                 unsigned int id;
1608                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1609                                                                 port_names[i],
1610                                                                 &id);
1611                 if (id != i + PORT_OFF) {
1612                         printf("%d: %s id incorrect, expected %d got %d\n",
1613                                         __LINE__, port_names[i], i+PORT_OFF,
1614                                         id);
1615                         failed = 1;
1616                 }
1617                 if (val != port_expected[i]) {
1618                         printf("%d: %s value incorrect, expected %"PRIu64
1619                                 " got %d\n", __LINE__, port_names[i],
1620                                 port_expected[i], id);
1621                         failed = 1;
1622                 }
1623                 /* reset to zero */
1624                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1625                                                 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1626                                                 &id,
1627                                                 1);
1628                 if (reset_ret) {
1629                         printf("%d: failed to reset successfully\n", __LINE__);
1630                         failed = 1;
1631                 }
1632                 /* check value again */
1633                 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1634                 if (val != port_expected_zero[i]) {
1635                         printf("%d: %s value incorrect, expected %"PRIu64
1636                                 " got %"PRIu64"\n", __LINE__, port_names[i],
1637                                 port_expected_zero[i], val);
1638                         failed = 1;
1639                 }
1640         };
1641         if (failed)
1642                 goto fail;
1643
1644 /* num queue stats */
1645 #define NUM_Q_STATS 17
1646 /* queue offset from start of the devices whole xstats.
1647  * This will break every time we add a statistic to a device/port/queue
1648  */
1649 #define QUEUE_OFF 90
1650         const uint32_t queue = 0;
1651         num_stats = rte_event_dev_xstats_names_get(evdev,
1652                                         RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1653                                         xstats_names, ids, XSTATS_MAX);
1654         if (num_stats != NUM_Q_STATS) {
1655                 printf("%d: expected %d stats, got return %d\n",
1656                         __LINE__, NUM_Q_STATS, num_stats);
1657                 goto fail;
1658         }
1659         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1660                                         queue, ids, values, num_stats);
1661         if (ret != NUM_Q_STATS) {
1662                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1663                 goto fail;
1664         }
1665         static const char * const queue_names[] = {
1666                 "qid_0_rx",
1667                 "qid_0_tx",
1668                 "qid_0_drop",
1669                 "qid_0_inflight",
1670                 "qid_0_iq_size",
1671                 "qid_0_iq_0_used",
1672                 "qid_0_iq_1_used",
1673                 "qid_0_iq_2_used",
1674                 "qid_0_iq_3_used",
1675                 "qid_0_port_0_pinned_flows",
1676                 "qid_0_port_0_packets",
1677                 "qid_0_port_1_pinned_flows",
1678                 "qid_0_port_1_packets",
1679                 "qid_0_port_2_pinned_flows",
1680                 "qid_0_port_2_packets",
1681                 "qid_0_port_3_pinned_flows",
1682                 "qid_0_port_3_packets",
1683         };
1684         uint64_t queue_expected[] = {
1685                 7, /* rx */
1686                 7, /* tx */
1687                 0, /* drop */
1688                 7, /* inflight */
1689                 512, /* iq size */
1690                 0, /* iq 0 used */
1691                 0, /* iq 1 used */
1692                 0, /* iq 2 used */
1693                 0, /* iq 3 used */
1694                 /* QID-to-Port: pinned_flows, packets */
1695                 0, 0,
1696                 0, 0,
1697                 1, 7,
1698                 0, 0,
1699         };
1700         uint64_t queue_expected_zero[] = {
1701                 0, /* rx */
1702                 0, /* tx */
1703                 0, /* drop */
1704                 7, /* inflight */
1705                 512, /* iq size */
1706                 0, /* iq 0 used */
1707                 0, /* iq 1 used */
1708                 0, /* iq 2 used */
1709                 0, /* iq 3 used */
1710                 /* QID-to-Port: pinned_flows, packets */
1711                 0, 0,
1712                 0, 0,
1713                 1, 0,
1714                 0, 0,
1715         };
1716         if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1717                         RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1718                         RTE_DIM(queue_names) != NUM_Q_STATS) {
1719                 printf("%d : queue array of wrong size\n", __LINE__);
1720                 goto fail;
1721         }
1722
1723         failed = 0;
1724         for (i = 0; (int)i < ret; i++) {
1725                 unsigned int id;
1726                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1727                                                                 queue_names[i],
1728                                                                 &id);
1729                 if (id != i + QUEUE_OFF) {
1730                         printf("%d: %s id incorrect, expected %d got %d\n",
1731                                         __LINE__, queue_names[i], i+QUEUE_OFF,
1732                                         id);
1733                         failed = 1;
1734                 }
1735                 if (val != queue_expected[i]) {
1736                         printf("%d: %d: %s value , expected %"PRIu64
1737                                 " got %"PRIu64"\n", i, __LINE__,
1738                                 queue_names[i], queue_expected[i], val);
1739                         failed = 1;
1740                 }
1741                 /* reset to zero */
1742                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1743                                                 RTE_EVENT_DEV_XSTATS_QUEUE,
1744                                                 queue, &id, 1);
1745                 if (reset_ret) {
1746                         printf("%d: failed to reset successfully\n", __LINE__);
1747                         failed = 1;
1748                 }
1749                 /* check value again */
1750                 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1751                                                         0);
1752                 if (val != queue_expected_zero[i]) {
1753                         printf("%d: %s value incorrect, expected %"PRIu64
1754                                 " got %"PRIu64"\n", __LINE__, queue_names[i],
1755                                 queue_expected_zero[i], val);
1756                         failed = 1;
1757                 }
1758         };
1759
1760         if (failed)
1761                 goto fail;
1762
1763         cleanup(t);
1764         return 0;
1765 fail:
1766         cleanup(t);
1767         return -1;
1768 }
1769
1770 static int
1771 ordered_reconfigure(struct test *t)
1772 {
1773         if (init(t, 1, 1) < 0 ||
1774                         create_ports(t, 1) < 0) {
1775                 printf("%d: Error initializing device\n", __LINE__);
1776                 return -1;
1777         }
1778
1779         const struct rte_event_queue_conf conf = {
1780                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
1781                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1782                         .nb_atomic_flows = 1024,
1783                         .nb_atomic_order_sequences = 1024,
1784         };
1785
1786         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1787                 printf("%d: error creating qid\n", __LINE__);
1788                 goto failed;
1789         }
1790
1791         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1792                 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1793                 goto failed;
1794         }
1795
1796         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1797         if (rte_event_dev_start(evdev) < 0) {
1798                 printf("%d: Error with start call\n", __LINE__);
1799                 return -1;
1800         }
1801
1802         cleanup(t);
1803         return 0;
1804 failed:
1805         cleanup(t);
1806         return -1;
1807 }
1808
1809 static int
1810 qid_priorities(struct test *t)
1811 {
1812         /* Test works by having a CQ with enough empty space for all packets,
1813          * and enqueueing 3 packets to 3 QIDs. They must return based on the
1814          * priority of the QID, not the ingress order, to pass the test
1815          */
1816         unsigned int i;
1817         /* Create instance with 1 ports, and 3 qids */
1818         if (init(t, 3, 1) < 0 ||
1819                         create_ports(t, 1) < 0) {
1820                 printf("%d: Error initializing device\n", __LINE__);
1821                 return -1;
1822         }
1823
1824         for (i = 0; i < 3; i++) {
1825                 /* Create QID */
1826                 const struct rte_event_queue_conf conf = {
1827                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1828                         /* increase priority (0 == highest), as we go */
1829                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1830                         .nb_atomic_flows = 1024,
1831                         .nb_atomic_order_sequences = 1024,
1832                 };
1833
1834                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1835                         printf("%d: error creating qid %d\n", __LINE__, i);
1836                         return -1;
1837                 }
1838                 t->qid[i] = i;
1839         }
1840         t->nb_qids = i;
1841         /* map all QIDs to port */
1842         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1843
1844         if (rte_event_dev_start(evdev) < 0) {
1845                 printf("%d: Error with start call\n", __LINE__);
1846                 return -1;
1847         }
1848
1849         /* enqueue 3 packets, setting seqn and QID to check priority */
1850         for (i = 0; i < 3; i++) {
1851                 struct rte_event ev;
1852                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1853                 if (!arp) {
1854                         printf("%d: gen of pkt failed\n", __LINE__);
1855                         return -1;
1856                 }
1857                 ev.queue_id = t->qid[i];
1858                 ev.op = RTE_EVENT_OP_NEW;
1859                 ev.mbuf = arp;
1860                 arp->seqn = i;
1861
1862                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1863                 if (err != 1) {
1864                         printf("%d: Failed to enqueue\n", __LINE__);
1865                         return -1;
1866                 }
1867         }
1868
1869         rte_event_schedule(evdev);
1870
1871         /* dequeue packets, verify priority was upheld */
1872         struct rte_event ev[32];
1873         uint32_t deq_pkts =
1874                 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1875         if (deq_pkts != 3) {
1876                 printf("%d: failed to deq packets\n", __LINE__);
1877                 rte_event_dev_dump(evdev, stdout);
1878                 return -1;
1879         }
1880         for (i = 0; i < 3; i++) {
1881                 if (ev[i].mbuf->seqn != 2-i) {
1882                         printf(
1883                                 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1884                                         __LINE__, i);
1885                 }
1886         }
1887
1888         cleanup(t);
1889         return 0;
1890 }
1891
1892 static int
1893 load_balancing(struct test *t)
1894 {
1895         const int rx_enq = 0;
1896         int err;
1897         uint32_t i;
1898
1899         if (init(t, 1, 4) < 0 ||
1900                         create_ports(t, 4) < 0 ||
1901                         create_atomic_qids(t, 1) < 0) {
1902                 printf("%d: Error initializing device\n", __LINE__);
1903                 return -1;
1904         }
1905
1906         for (i = 0; i < 3; i++) {
1907                 /* map port 1 - 3 inclusive */
1908                 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1909                                 NULL, 1) != 1) {
1910                         printf("%d: error mapping qid to port %d\n",
1911                                         __LINE__, i);
1912                         return -1;
1913                 }
1914         }
1915
1916         if (rte_event_dev_start(evdev) < 0) {
1917                 printf("%d: Error with start call\n", __LINE__);
1918                 return -1;
1919         }
1920
1921         /************** FORWARD ****************/
1922         /*
1923          * Create a set of flows that test the load-balancing operation of the
1924          * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
1925          * with a new flow, which should be sent to the 3rd mapped CQ
1926          */
1927         static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
1928
1929         for (i = 0; i < RTE_DIM(flows); i++) {
1930                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1931                 if (!arp) {
1932                         printf("%d: gen of pkt failed\n", __LINE__);
1933                         return -1;
1934                 }
1935
1936                 struct rte_event ev = {
1937                                 .op = RTE_EVENT_OP_NEW,
1938                                 .queue_id = t->qid[0],
1939                                 .flow_id = flows[i],
1940                                 .mbuf = arp,
1941                 };
1942                 /* generate pkt and enqueue */
1943                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1944                 if (err < 0) {
1945                         printf("%d: Failed to enqueue\n", __LINE__);
1946                         return -1;
1947                 }
1948         }
1949
1950         rte_event_schedule(evdev);
1951
1952         struct test_event_dev_stats stats;
1953         err = test_event_dev_stats_get(evdev, &stats);
1954         if (err) {
1955                 printf("%d: failed to get stats\n", __LINE__);
1956                 return -1;
1957         }
1958
1959         if (stats.port_inflight[1] != 4) {
1960                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
1961                                 __func__);
1962                 return -1;
1963         }
1964         if (stats.port_inflight[2] != 2) {
1965                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
1966                                 __func__);
1967                 return -1;
1968         }
1969         if (stats.port_inflight[3] != 3) {
1970                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
1971                                 __func__);
1972                 return -1;
1973         }
1974
1975         cleanup(t);
1976         return 0;
1977 }
1978
1979 static int
1980 load_balancing_history(struct test *t)
1981 {
1982         struct test_event_dev_stats stats = {0};
1983         const int rx_enq = 0;
1984         int err;
1985         uint32_t i;
1986
1987         /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
1988         if (init(t, 1, 4) < 0 ||
1989                         create_ports(t, 4) < 0 ||
1990                         create_atomic_qids(t, 1) < 0)
1991                 return -1;
1992
1993         /* CQ mapping to QID */
1994         if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
1995                 printf("%d: error mapping port 1 qid\n", __LINE__);
1996                 return -1;
1997         }
1998         if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
1999                 printf("%d: error mapping port 2 qid\n", __LINE__);
2000                 return -1;
2001         }
2002         if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2003                 printf("%d: error mapping port 3 qid\n", __LINE__);
2004                 return -1;
2005         }
2006         if (rte_event_dev_start(evdev) < 0) {
2007                 printf("%d: Error with start call\n", __LINE__);
2008                 return -1;
2009         }
2010
2011         /*
2012          * Create a set of flows that test the load-balancing operation of the
2013          * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2014          * the packet from CQ 0, send in a new set of flows. Ensure that:
2015          *  1. The new flow 3 gets into the empty CQ0
2016          *  2. packets for existing flow gets added into CQ1
2017          *  3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2018          *     more outstanding pkts
2019          *
2020          *  This test makes sure that when a flow ends (i.e. all packets
2021          *  have been completed for that flow), that the flow can be moved
2022          *  to a different CQ when new packets come in for that flow.
2023          */
2024         static uint32_t flows1[] = {0, 1, 1, 2};
2025
2026         for (i = 0; i < RTE_DIM(flows1); i++) {
2027                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2028                 struct rte_event ev = {
2029                                 .flow_id = flows1[i],
2030                                 .op = RTE_EVENT_OP_NEW,
2031                                 .queue_id = t->qid[0],
2032                                 .event_type = RTE_EVENT_TYPE_CPU,
2033                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2034                                 .mbuf = arp
2035                 };
2036
2037                 if (!arp) {
2038                         printf("%d: gen of pkt failed\n", __LINE__);
2039                         return -1;
2040                 }
2041                 arp->hash.rss = flows1[i];
2042                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2043                 if (err < 0) {
2044                         printf("%d: Failed to enqueue\n", __LINE__);
2045                         return -1;
2046                 }
2047         }
2048
2049         /* call the scheduler */
2050         rte_event_schedule(evdev);
2051
2052         /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2053         struct rte_event ev;
2054         if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2055                 printf("%d: failed to dequeue\n", __LINE__);
2056                 return -1;
2057         }
2058         if (ev.mbuf->hash.rss != flows1[0]) {
2059                 printf("%d: unexpected flow received\n", __LINE__);
2060                 return -1;
2061         }
2062
2063         /* drop the flow 0 packet from port 1 */
2064         rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2065
2066         /* call the scheduler */
2067         rte_event_schedule(evdev);
2068
2069         /*
2070          * Set up the next set of flows, first a new flow to fill up
2071          * CQ 0, so that the next flow 0 packet should go to CQ2
2072          */
2073         static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2074
2075         for (i = 0; i < RTE_DIM(flows2); i++) {
2076                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2077                 struct rte_event ev = {
2078                                 .flow_id = flows2[i],
2079                                 .op = RTE_EVENT_OP_NEW,
2080                                 .queue_id = t->qid[0],
2081                                 .event_type = RTE_EVENT_TYPE_CPU,
2082                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2083                                 .mbuf = arp
2084                 };
2085
2086                 if (!arp) {
2087                         printf("%d: gen of pkt failed\n", __LINE__);
2088                         return -1;
2089                 }
2090                 arp->hash.rss = flows2[i];
2091
2092                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2093                 if (err < 0) {
2094                         printf("%d: Failed to enqueue\n", __LINE__);
2095                         return -1;
2096                 }
2097         }
2098
2099         /* schedule */
2100         rte_event_schedule(evdev);
2101
2102         err = test_event_dev_stats_get(evdev, &stats);
2103         if (err) {
2104                 printf("%d:failed to get stats\n", __LINE__);
2105                 return -1;
2106         }
2107
2108         /*
2109          * Now check the resulting inflights on each port.
2110          */
2111         if (stats.port_inflight[1] != 3) {
2112                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2113                                 __func__);
2114                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2115                                 (unsigned int)stats.port_inflight[1],
2116                                 (unsigned int)stats.port_inflight[2],
2117                                 (unsigned int)stats.port_inflight[3]);
2118                 return -1;
2119         }
2120         if (stats.port_inflight[2] != 4) {
2121                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2122                                 __func__);
2123                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2124                                 (unsigned int)stats.port_inflight[1],
2125                                 (unsigned int)stats.port_inflight[2],
2126                                 (unsigned int)stats.port_inflight[3]);
2127                 return -1;
2128         }
2129         if (stats.port_inflight[3] != 2) {
2130                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2131                                 __func__);
2132                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2133                                 (unsigned int)stats.port_inflight[1],
2134                                 (unsigned int)stats.port_inflight[2],
2135                                 (unsigned int)stats.port_inflight[3]);
2136                 return -1;
2137         }
2138
2139         for (i = 1; i <= 3; i++) {
2140                 struct rte_event ev;
2141                 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2142                         rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2143         }
2144         rte_event_schedule(evdev);
2145
2146         cleanup(t);
2147         return 0;
2148 }
2149
2150 static int
2151 invalid_qid(struct test *t)
2152 {
2153         struct test_event_dev_stats stats;
2154         const int rx_enq = 0;
2155         int err;
2156         uint32_t i;
2157
2158         if (init(t, 1, 4) < 0 ||
2159                         create_ports(t, 4) < 0 ||
2160                         create_atomic_qids(t, 1) < 0) {
2161                 printf("%d: Error initializing device\n", __LINE__);
2162                 return -1;
2163         }
2164
2165         /* CQ mapping to QID */
2166         for (i = 0; i < 4; i++) {
2167                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2168                                 NULL, 1);
2169                 if (err != 1) {
2170                         printf("%d: error mapping port 1 qid\n", __LINE__);
2171                         return -1;
2172                 }
2173         }
2174
2175         if (rte_event_dev_start(evdev) < 0) {
2176                 printf("%d: Error with start call\n", __LINE__);
2177                 return -1;
2178         }
2179
2180         /*
2181          * Send in a packet with an invalid qid to the scheduler.
2182          * We should see the packed enqueued OK, but the inflights for
2183          * that packet should not be incremented, and the rx_dropped
2184          * should be incremented.
2185          */
2186         static uint32_t flows1[] = {20};
2187
2188         for (i = 0; i < RTE_DIM(flows1); i++) {
2189                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2190                 if (!arp) {
2191                         printf("%d: gen of pkt failed\n", __LINE__);
2192                         return -1;
2193                 }
2194
2195                 struct rte_event ev = {
2196                                 .op = RTE_EVENT_OP_NEW,
2197                                 .queue_id = t->qid[0] + flows1[i],
2198                                 .flow_id = i,
2199                                 .mbuf = arp,
2200                 };
2201                 /* generate pkt and enqueue */
2202                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2203                 if (err < 0) {
2204                         printf("%d: Failed to enqueue\n", __LINE__);
2205                         return -1;
2206                 }
2207         }
2208
2209         /* call the scheduler */
2210         rte_event_schedule(evdev);
2211
2212         err = test_event_dev_stats_get(evdev, &stats);
2213         if (err) {
2214                 printf("%d: failed to get stats\n", __LINE__);
2215                 return -1;
2216         }
2217
2218         /*
2219          * Now check the resulting inflights on the port, and the rx_dropped.
2220          */
2221         if (stats.port_inflight[0] != 0) {
2222                 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2223                                 __func__);
2224                 rte_event_dev_dump(evdev, stdout);
2225                 return -1;
2226         }
2227         if (stats.port_rx_dropped[0] != 1) {
2228                 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2229                 rte_event_dev_dump(evdev, stdout);
2230                 return -1;
2231         }
2232         /* each packet drop should only be counted in one place - port or dev */
2233         if (stats.rx_dropped != 0) {
2234                 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2235                                 __func__);
2236                 rte_event_dev_dump(evdev, stdout);
2237                 return -1;
2238         }
2239
2240         cleanup(t);
2241         return 0;
2242 }
2243
2244 static int
2245 single_packet(struct test *t)
2246 {
2247         const uint32_t MAGIC_SEQN = 7321;
2248         struct rte_event ev;
2249         struct test_event_dev_stats stats;
2250         const int rx_enq = 0;
2251         const int wrk_enq = 2;
2252         int err;
2253
2254         /* Create instance with 4 ports */
2255         if (init(t, 1, 4) < 0 ||
2256                         create_ports(t, 4) < 0 ||
2257                         create_atomic_qids(t, 1) < 0) {
2258                 printf("%d: Error initializing device\n", __LINE__);
2259                 return -1;
2260         }
2261
2262         /* CQ mapping to QID */
2263         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2264         if (err != 1) {
2265                 printf("%d: error mapping lb qid\n", __LINE__);
2266                 cleanup(t);
2267                 return -1;
2268         }
2269
2270         if (rte_event_dev_start(evdev) < 0) {
2271                 printf("%d: Error with start call\n", __LINE__);
2272                 return -1;
2273         }
2274
2275         /************** Gen pkt and enqueue ****************/
2276         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2277         if (!arp) {
2278                 printf("%d: gen of pkt failed\n", __LINE__);
2279                 return -1;
2280         }
2281
2282         ev.op = RTE_EVENT_OP_NEW;
2283         ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2284         ev.mbuf = arp;
2285         ev.queue_id = 0;
2286         ev.flow_id = 3;
2287         arp->seqn = MAGIC_SEQN;
2288
2289         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2290         if (err < 0) {
2291                 printf("%d: Failed to enqueue\n", __LINE__);
2292                 return -1;
2293         }
2294
2295         rte_event_schedule(evdev);
2296
2297         err = test_event_dev_stats_get(evdev, &stats);
2298         if (err) {
2299                 printf("%d: failed to get stats\n", __LINE__);
2300                 return -1;
2301         }
2302
2303         if (stats.rx_pkts != 1 ||
2304                         stats.tx_pkts != 1 ||
2305                         stats.port_inflight[wrk_enq] != 1) {
2306                 printf("%d: Sched core didn't handle pkt as expected\n",
2307                                 __LINE__);
2308                 rte_event_dev_dump(evdev, stdout);
2309                 return -1;
2310         }
2311
2312         uint32_t deq_pkts;
2313
2314         deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2315         if (deq_pkts < 1) {
2316                 printf("%d: Failed to deq\n", __LINE__);
2317                 return -1;
2318         }
2319
2320         err = test_event_dev_stats_get(evdev, &stats);
2321         if (err) {
2322                 printf("%d: failed to get stats\n", __LINE__);
2323                 return -1;
2324         }
2325
2326         err = test_event_dev_stats_get(evdev, &stats);
2327         if (ev.mbuf->seqn != MAGIC_SEQN) {
2328                 printf("%d: magic sequence number not dequeued\n", __LINE__);
2329                 return -1;
2330         }
2331
2332         rte_pktmbuf_free(ev.mbuf);
2333         err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2334         if (err < 0) {
2335                 printf("%d: Failed to enqueue\n", __LINE__);
2336                 return -1;
2337         }
2338         rte_event_schedule(evdev);
2339
2340         err = test_event_dev_stats_get(evdev, &stats);
2341         if (stats.port_inflight[wrk_enq] != 0) {
2342                 printf("%d: port inflight not correct\n", __LINE__);
2343                 return -1;
2344         }
2345
2346         cleanup(t);
2347         return 0;
2348 }
2349
2350 static int
2351 inflight_counts(struct test *t)
2352 {
2353         struct rte_event ev;
2354         struct test_event_dev_stats stats;
2355         const int rx_enq = 0;
2356         const int p1 = 1;
2357         const int p2 = 2;
2358         int err;
2359         int i;
2360
2361         /* Create instance with 4 ports */
2362         if (init(t, 2, 3) < 0 ||
2363                         create_ports(t, 3) < 0 ||
2364                         create_atomic_qids(t, 2) < 0) {
2365                 printf("%d: Error initializing device\n", __LINE__);
2366                 return -1;
2367         }
2368
2369         /* CQ mapping to QID */
2370         err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2371         if (err != 1) {
2372                 printf("%d: error mapping lb qid\n", __LINE__);
2373                 cleanup(t);
2374                 return -1;
2375         }
2376         err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2377         if (err != 1) {
2378                 printf("%d: error mapping lb qid\n", __LINE__);
2379                 cleanup(t);
2380                 return -1;
2381         }
2382
2383         if (rte_event_dev_start(evdev) < 0) {
2384                 printf("%d: Error with start call\n", __LINE__);
2385                 return -1;
2386         }
2387
2388         /************** FORWARD ****************/
2389 #define QID1_NUM 5
2390         for (i = 0; i < QID1_NUM; i++) {
2391                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2392
2393                 if (!arp) {
2394                         printf("%d: gen of pkt failed\n", __LINE__);
2395                         goto err;
2396                 }
2397
2398                 ev.queue_id =  t->qid[0];
2399                 ev.op = RTE_EVENT_OP_NEW;
2400                 ev.mbuf = arp;
2401                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2402                 if (err != 1) {
2403                         printf("%d: Failed to enqueue\n", __LINE__);
2404                         goto err;
2405                 }
2406         }
2407 #define QID2_NUM 3
2408         for (i = 0; i < QID2_NUM; i++) {
2409                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2410
2411                 if (!arp) {
2412                         printf("%d: gen of pkt failed\n", __LINE__);
2413                         goto err;
2414                 }
2415                 ev.queue_id =  t->qid[1];
2416                 ev.op = RTE_EVENT_OP_NEW;
2417                 ev.mbuf = arp;
2418                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2419                 if (err != 1) {
2420                         printf("%d: Failed to enqueue\n", __LINE__);
2421                         goto err;
2422                 }
2423         }
2424
2425         /* schedule */
2426         rte_event_schedule(evdev);
2427
2428         err = test_event_dev_stats_get(evdev, &stats);
2429         if (err) {
2430                 printf("%d: failed to get stats\n", __LINE__);
2431                 goto err;
2432         }
2433
2434         if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2435                         stats.tx_pkts != QID1_NUM + QID2_NUM) {
2436                 printf("%d: Sched core didn't handle pkt as expected\n",
2437                                 __LINE__);
2438                 goto err;
2439         }
2440
2441         if (stats.port_inflight[p1] != QID1_NUM) {
2442                 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2443                                 __func__);
2444                 goto err;
2445         }
2446         if (stats.port_inflight[p2] != QID2_NUM) {
2447                 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2448                                 __func__);
2449                 goto err;
2450         }
2451
2452         /************** DEQUEUE INFLIGHT COUNT CHECKS  ****************/
2453         /* port 1 */
2454         struct rte_event events[QID1_NUM + QID2_NUM];
2455         uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2456                         RTE_DIM(events), 0);
2457
2458         if (deq_pkts != QID1_NUM) {
2459                 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2460                 goto err;
2461         }
2462         err = test_event_dev_stats_get(evdev, &stats);
2463         if (stats.port_inflight[p1] != QID1_NUM) {
2464                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2465                                 __LINE__);
2466                 goto err;
2467         }
2468         for (i = 0; i < QID1_NUM; i++) {
2469                 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2470                                 1);
2471                 if (err != 1) {
2472                         printf("%d: %s rte enqueue of inf release failed\n",
2473                                 __LINE__, __func__);
2474                         goto err;
2475                 }
2476         }
2477
2478         /*
2479          * As the scheduler core decrements inflights, it needs to run to
2480          * process packets to act on the drop messages
2481          */
2482         rte_event_schedule(evdev);
2483
2484         err = test_event_dev_stats_get(evdev, &stats);
2485         if (stats.port_inflight[p1] != 0) {
2486                 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2487                 goto err;
2488         }
2489
2490         /* port2 */
2491         deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2492                         RTE_DIM(events), 0);
2493         if (deq_pkts != QID2_NUM) {
2494                 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2495                 goto err;
2496         }
2497         err = test_event_dev_stats_get(evdev, &stats);
2498         if (stats.port_inflight[p2] != QID2_NUM) {
2499                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2500                                 __LINE__);
2501                 goto err;
2502         }
2503         for (i = 0; i < QID2_NUM; i++) {
2504                 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2505                                 1);
2506                 if (err != 1) {
2507                         printf("%d: %s rte enqueue of inf release failed\n",
2508                                 __LINE__, __func__);
2509                         goto err;
2510                 }
2511         }
2512
2513         /*
2514          * As the scheduler core decrements inflights, it needs to run to
2515          * process packets to act on the drop messages
2516          */
2517         rte_event_schedule(evdev);
2518
2519         err = test_event_dev_stats_get(evdev, &stats);
2520         if (stats.port_inflight[p2] != 0) {
2521                 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2522                 goto err;
2523         }
2524         cleanup(t);
2525         return 0;
2526
2527 err:
2528         rte_event_dev_dump(evdev, stdout);
2529         cleanup(t);
2530         return -1;
2531 }
2532
2533 static int
2534 parallel_basic(struct test *t, int check_order)
2535 {
2536         const uint8_t rx_port = 0;
2537         const uint8_t w1_port = 1;
2538         const uint8_t w3_port = 3;
2539         const uint8_t tx_port = 4;
2540         int err;
2541         int i;
2542         uint32_t deq_pkts, j;
2543         struct rte_mbuf *mbufs[3];
2544         struct rte_mbuf *mbufs_out[3] = { 0 };
2545         const uint32_t MAGIC_SEQN = 1234;
2546
2547         /* Create instance with 4 ports */
2548         if (init(t, 2, tx_port + 1) < 0 ||
2549                         create_ports(t, tx_port + 1) < 0 ||
2550                         (check_order ?  create_ordered_qids(t, 1) :
2551                                 create_unordered_qids(t, 1)) < 0 ||
2552                         create_directed_qids(t, 1, &tx_port)) {
2553                 printf("%d: Error initializing device\n", __LINE__);
2554                 return -1;
2555         }
2556
2557         /*
2558          * CQ mapping to QID
2559          * We need three ports, all mapped to the same ordered qid0. Then we'll
2560          * take a packet out to each port, re-enqueue in reverse order,
2561          * then make sure the reordering has taken place properly when we
2562          * dequeue from the tx_port.
2563          *
2564          * Simplified test setup diagram:
2565          *
2566          * rx_port        w1_port
2567          *        \     /         \
2568          *         qid0 - w2_port - qid1
2569          *              \         /     \
2570          *                w3_port        tx_port
2571          */
2572         /* CQ mapping to QID for LB ports (directed mapped on create) */
2573         for (i = w1_port; i <= w3_port; i++) {
2574                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2575                                 1);
2576                 if (err != 1) {
2577                         printf("%d: error mapping lb qid\n", __LINE__);
2578                         cleanup(t);
2579                         return -1;
2580                 }
2581         }
2582
2583         if (rte_event_dev_start(evdev) < 0) {
2584                 printf("%d: Error with start call\n", __LINE__);
2585                 return -1;
2586         }
2587
2588         /* Enqueue 3 packets to the rx port */
2589         for (i = 0; i < 3; i++) {
2590                 struct rte_event ev;
2591                 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2592                 if (!mbufs[i]) {
2593                         printf("%d: gen of pkt failed\n", __LINE__);
2594                         return -1;
2595                 }
2596
2597                 ev.queue_id = t->qid[0];
2598                 ev.op = RTE_EVENT_OP_NEW;
2599                 ev.mbuf = mbufs[i];
2600                 mbufs[i]->seqn = MAGIC_SEQN + i;
2601
2602                 /* generate pkt and enqueue */
2603                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2604                 if (err != 1) {
2605                         printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2606                                         __LINE__, i, err);
2607                         return -1;
2608                 }
2609         }
2610
2611         rte_event_schedule(evdev);
2612
2613         /* use extra slot to make logic in loops easier */
2614         struct rte_event deq_ev[w3_port + 1];
2615
2616         /* Dequeue the 3 packets, one from each worker port */
2617         for (i = w1_port; i <= w3_port; i++) {
2618                 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2619                                 &deq_ev[i], 1, 0);
2620                 if (deq_pkts != 1) {
2621                         printf("%d: Failed to deq\n", __LINE__);
2622                         rte_event_dev_dump(evdev, stdout);
2623                         return -1;
2624                 }
2625         }
2626
2627         /* Enqueue each packet in reverse order, flushing after each one */
2628         for (i = w3_port; i >= w1_port; i--) {
2629
2630                 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2631                 deq_ev[i].queue_id = t->qid[1];
2632                 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2633                 if (err != 1) {
2634                         printf("%d: Failed to enqueue\n", __LINE__);
2635                         return -1;
2636                 }
2637         }
2638         rte_event_schedule(evdev);
2639
2640         /* dequeue from the tx ports, we should get 3 packets */
2641         deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2642                         3, 0);
2643
2644         /* Check to see if we've got all 3 packets */
2645         if (deq_pkts != 3) {
2646                 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2647                         __LINE__, deq_pkts, tx_port);
2648                 rte_event_dev_dump(evdev, stdout);
2649                 return 1;
2650         }
2651
2652         /* Check to see if the sequence numbers are in expected order */
2653         if (check_order) {
2654                 for (j = 0 ; j < deq_pkts ; j++) {
2655                         if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2656                                 printf(
2657                                         "%d: Incorrect sequence number(%d) from port %d\n",
2658                                         __LINE__, mbufs_out[j]->seqn, tx_port);
2659                                 return -1;
2660                         }
2661                 }
2662         }
2663
2664         /* Destroy the instance */
2665         cleanup(t);
2666         return 0;
2667 }
2668
2669 static int
2670 ordered_basic(struct test *t)
2671 {
2672         return parallel_basic(t, 1);
2673 }
2674
2675 static int
2676 unordered_basic(struct test *t)
2677 {
2678         return parallel_basic(t, 0);
2679 }
2680
2681 static int
2682 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2683 {
2684         const struct rte_event new_ev = {
2685                         .op = RTE_EVENT_OP_NEW
2686                         /* all other fields zero */
2687         };
2688         struct rte_event ev = new_ev;
2689         unsigned int rx_port = 0; /* port we get the first flow on */
2690         char rx_port_used_stat[64];
2691         char rx_port_free_stat[64];
2692         char other_port_used_stat[64];
2693
2694         if (init(t, 1, 2) < 0 ||
2695                         create_ports(t, 2) < 0 ||
2696                         create_atomic_qids(t, 1) < 0) {
2697                 printf("%d: Error initializing device\n", __LINE__);
2698                 return -1;
2699         }
2700         int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2701         if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2702                         nb_links != 1) {
2703                 printf("%d: Error links queue to ports\n", __LINE__);
2704                 goto err;
2705         }
2706         if (rte_event_dev_start(evdev) < 0) {
2707                 printf("%d: Error with start call\n", __LINE__);
2708                 goto err;
2709         }
2710
2711         /* send one packet and see where it goes, port 0 or 1 */
2712         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2713                 printf("%d: Error doing first enqueue\n", __LINE__);
2714                 goto err;
2715         }
2716         rte_event_schedule(evdev);
2717
2718         if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2719                         != 1)
2720                 rx_port = 1;
2721
2722         snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2723                         "port_%u_cq_ring_used", rx_port);
2724         snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2725                         "port_%u_cq_ring_free", rx_port);
2726         snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2727                         "port_%u_cq_ring_used", rx_port ^ 1);
2728         if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2729                         != 1) {
2730                 printf("%d: Error, first event not scheduled\n", __LINE__);
2731                 goto err;
2732         }
2733
2734         /* now fill up the rx port's queue with one flow to cause HOLB */
2735         do {
2736                 ev = new_ev;
2737                 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2738                         printf("%d: Error with enqueue\n", __LINE__);
2739                         goto err;
2740                 }
2741                 rte_event_schedule(evdev);
2742         } while (rte_event_dev_xstats_by_name_get(evdev,
2743                                 rx_port_free_stat, NULL) != 0);
2744
2745         /* one more packet, which needs to stay in IQ - i.e. HOLB */
2746         ev = new_ev;
2747         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2748                 printf("%d: Error with enqueue\n", __LINE__);
2749                 goto err;
2750         }
2751         rte_event_schedule(evdev);
2752
2753         /* check that the other port still has an empty CQ */
2754         if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2755                         != 0) {
2756                 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2757                 goto err;
2758         }
2759         /* check IQ now has one packet */
2760         if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2761                         != 1) {
2762                 printf("%d: Error, QID does not have exactly 1 packet\n",
2763                         __LINE__);
2764                 goto err;
2765         }
2766
2767         /* send another flow, which should pass the other IQ entry */
2768         ev = new_ev;
2769         ev.flow_id = 1;
2770         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2771                 printf("%d: Error with enqueue\n", __LINE__);
2772                 goto err;
2773         }
2774         rte_event_schedule(evdev);
2775
2776         if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2777                         != 1) {
2778                 printf("%d: Error, second flow did not pass out first\n",
2779                         __LINE__);
2780                 goto err;
2781         }
2782
2783         if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2784                         != 1) {
2785                 printf("%d: Error, QID does not have exactly 1 packet\n",
2786                         __LINE__);
2787                 goto err;
2788         }
2789         cleanup(t);
2790         return 0;
2791 err:
2792         rte_event_dev_dump(evdev, stdout);
2793         cleanup(t);
2794         return -1;
2795 }
2796
2797 static int
2798 worker_loopback_worker_fn(void *arg)
2799 {
2800         struct test *t = arg;
2801         uint8_t port = t->port[1];
2802         int count = 0;
2803         int enqd;
2804
2805         /*
2806          * Takes packets from the input port and then loops them back through
2807          * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2808          * so each packet goes through 8*16 = 128 times.
2809          */
2810         printf("%d: \tWorker function started\n", __LINE__);
2811         while (count < NUM_PACKETS) {
2812 #define BURST_SIZE 32
2813                 struct rte_event ev[BURST_SIZE];
2814                 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2815                                 BURST_SIZE, 0);
2816                 if (nb_rx == 0) {
2817                         rte_pause();
2818                         continue;
2819                 }
2820
2821                 for (i = 0; i < nb_rx; i++) {
2822                         ev[i].queue_id++;
2823                         if (ev[i].queue_id != 8) {
2824                                 ev[i].op = RTE_EVENT_OP_FORWARD;
2825                                 enqd = rte_event_enqueue_burst(evdev, port,
2826                                                 &ev[i], 1);
2827                                 if (enqd != 1) {
2828                                         printf("%d: Can't enqueue FWD!!\n",
2829                                                         __LINE__);
2830                                         return -1;
2831                                 }
2832                                 continue;
2833                         }
2834
2835                         ev[i].queue_id = 0;
2836                         ev[i].mbuf->udata64++;
2837                         if (ev[i].mbuf->udata64 != 16) {
2838                                 ev[i].op = RTE_EVENT_OP_FORWARD;
2839                                 enqd = rte_event_enqueue_burst(evdev, port,
2840                                                 &ev[i], 1);
2841                                 if (enqd != 1) {
2842                                         printf("%d: Can't enqueue FWD!!\n",
2843                                                         __LINE__);
2844                                         return -1;
2845                                 }
2846                                 continue;
2847                         }
2848                         /* we have hit 16 iterations through system - drop */
2849                         rte_pktmbuf_free(ev[i].mbuf);
2850                         count++;
2851                         ev[i].op = RTE_EVENT_OP_RELEASE;
2852                         enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
2853                         if (enqd != 1) {
2854                                 printf("%d drop enqueue failed\n", __LINE__);
2855                                 return -1;
2856                         }
2857                 }
2858         }
2859
2860         return 0;
2861 }
2862
2863 static int
2864 worker_loopback_producer_fn(void *arg)
2865 {
2866         struct test *t = arg;
2867         uint8_t port = t->port[0];
2868         uint64_t count = 0;
2869
2870         printf("%d: \tProducer function started\n", __LINE__);
2871         while (count < NUM_PACKETS) {
2872                 struct rte_mbuf *m = 0;
2873                 do {
2874                         m = rte_pktmbuf_alloc(t->mbuf_pool);
2875                 } while (m == NULL);
2876
2877                 m->udata64 = 0;
2878
2879                 struct rte_event ev = {
2880                                 .op = RTE_EVENT_OP_NEW,
2881                                 .queue_id = t->qid[0],
2882                                 .flow_id = (uintptr_t)m & 0xFFFF,
2883                                 .mbuf = m,
2884                 };
2885
2886                 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
2887                         while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
2888                                         1)
2889                                 rte_pause();
2890                 }
2891
2892                 count++;
2893         }
2894
2895         return 0;
2896 }
2897
2898 static int
2899 worker_loopback(struct test *t)
2900 {
2901         /* use a single producer core, and a worker core to see what happens
2902          * if the worker loops packets back multiple times
2903          */
2904         struct test_event_dev_stats stats;
2905         uint64_t print_cycles = 0, cycles = 0;
2906         uint64_t tx_pkts = 0;
2907         int err;
2908         int w_lcore, p_lcore;
2909
2910         if (init(t, 8, 2) < 0 ||
2911                         create_atomic_qids(t, 8) < 0) {
2912                 printf("%d: Error initializing device\n", __LINE__);
2913                 return -1;
2914         }
2915
2916         /* RX with low max events */
2917         static struct rte_event_port_conf conf = {
2918                         .dequeue_depth = 32,
2919                         .enqueue_depth = 64,
2920         };
2921         /* beware: this cannot be initialized in the static above as it would
2922          * only be initialized once - and this needs to be set for multiple runs
2923          */
2924         conf.new_event_threshold = 512;
2925
2926         if (rte_event_port_setup(evdev, 0, &conf) < 0) {
2927                 printf("Error setting up RX port\n");
2928                 return -1;
2929         }
2930         t->port[0] = 0;
2931         /* TX with higher max events */
2932         conf.new_event_threshold = 4096;
2933         if (rte_event_port_setup(evdev, 1, &conf) < 0) {
2934                 printf("Error setting up TX port\n");
2935                 return -1;
2936         }
2937         t->port[1] = 1;
2938
2939         /* CQ mapping to QID */
2940         err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2941         if (err != 8) { /* should have mapped all queues*/
2942                 printf("%d: error mapping port 2 to all qids\n", __LINE__);
2943                 return -1;
2944         }
2945
2946         if (rte_event_dev_start(evdev) < 0) {
2947                 printf("%d: Error with start call\n", __LINE__);
2948                 return -1;
2949         }
2950
2951         p_lcore = rte_get_next_lcore(
2952                         /* start core */ -1,
2953                         /* skip master */ 1,
2954                         /* wrap */ 0);
2955         w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
2956
2957         rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
2958         rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
2959
2960         print_cycles = cycles = rte_get_timer_cycles();
2961         while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
2962                         rte_eal_get_lcore_state(w_lcore) != FINISHED) {
2963
2964                 rte_event_schedule(evdev);
2965
2966                 uint64_t new_cycles = rte_get_timer_cycles();
2967
2968                 if (new_cycles - print_cycles > rte_get_timer_hz()) {
2969                         test_event_dev_stats_get(evdev, &stats);
2970                         printf(
2971                                 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
2972                                 __LINE__, stats.rx_pkts, stats.tx_pkts);
2973
2974                         print_cycles = new_cycles;
2975                 }
2976                 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
2977                         test_event_dev_stats_get(evdev, &stats);
2978                         if (stats.tx_pkts == tx_pkts) {
2979                                 rte_event_dev_dump(evdev, stdout);
2980                                 printf("Dumping xstats:\n");
2981                                 xstats_print();
2982                                 printf(
2983                                         "%d: No schedules for seconds, deadlock\n",
2984                                         __LINE__);
2985                                 return -1;
2986                         }
2987                         tx_pkts = stats.tx_pkts;
2988                         cycles = new_cycles;
2989                 }
2990         }
2991         rte_event_schedule(evdev); /* ensure all completions are flushed */
2992
2993         rte_eal_mp_wait_lcore();
2994
2995         cleanup(t);
2996         return 0;
2997 }
2998
2999 static struct rte_mempool *eventdev_func_mempool;
3000
3001 static int
3002 test_sw_eventdev(void)
3003 {
3004         struct test *t = malloc(sizeof(struct test));
3005         int ret;
3006
3007         /* manually initialize the op, older gcc's complain on static
3008          * initialization of struct elements that are a bitfield.
3009          */
3010         release_ev.op = RTE_EVENT_OP_RELEASE;
3011
3012         const char *eventdev_name = "event_sw0";
3013         evdev = rte_event_dev_get_dev_id(eventdev_name);
3014         if (evdev < 0) {
3015                 printf("%d: Eventdev %s not found - creating.\n",
3016                                 __LINE__, eventdev_name);
3017                 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3018                         printf("Error creating eventdev\n");
3019                         return -1;
3020                 }
3021                 evdev = rte_event_dev_get_dev_id(eventdev_name);
3022                 if (evdev < 0) {
3023                         printf("Error finding newly created eventdev\n");
3024                         return -1;
3025                 }
3026         }
3027
3028         /* Only create mbuf pool once, reuse for each test run */
3029         if (!eventdev_func_mempool) {
3030                 eventdev_func_mempool = rte_pktmbuf_pool_create(
3031                                 "EVENTDEV_SW_SA_MBUF_POOL",
3032                                 (1<<12), /* 4k buffers */
3033                                 32 /*MBUF_CACHE_SIZE*/,
3034                                 0,
3035                                 512, /* use very small mbufs */
3036                                 rte_socket_id());
3037                 if (!eventdev_func_mempool) {
3038                         printf("ERROR creating mempool\n");
3039                         return -1;
3040                 }
3041         }
3042         t->mbuf_pool = eventdev_func_mempool;
3043
3044         printf("*** Running Single Directed Packet test...\n");
3045         ret = test_single_directed_packet(t);
3046         if (ret != 0) {
3047                 printf("ERROR - Single Directed Packet test FAILED.\n");
3048                 return ret;
3049         }
3050         printf("*** Running Single Load Balanced Packet test...\n");
3051         ret = single_packet(t);
3052         if (ret != 0) {
3053                 printf("ERROR - Single Packet test FAILED.\n");
3054                 return ret;
3055         }
3056         printf("*** Running Unordered Basic test...\n");
3057         ret = unordered_basic(t);
3058         if (ret != 0) {
3059                 printf("ERROR -  Unordered Basic test FAILED.\n");
3060                 return ret;
3061         }
3062         printf("*** Running Ordered Basic test...\n");
3063         ret = ordered_basic(t);
3064         if (ret != 0) {
3065                 printf("ERROR -  Ordered Basic test FAILED.\n");
3066                 return ret;
3067         }
3068         printf("*** Running Burst Packets test...\n");
3069         ret = burst_packets(t);
3070         if (ret != 0) {
3071                 printf("ERROR - Burst Packets test FAILED.\n");
3072                 return ret;
3073         }
3074         printf("*** Running Load Balancing test...\n");
3075         ret = load_balancing(t);
3076         if (ret != 0) {
3077                 printf("ERROR - Load Balancing test FAILED.\n");
3078                 return ret;
3079         }
3080         printf("*** Running Prioritized Directed test...\n");
3081         ret = test_priority_directed(t);
3082         if (ret != 0) {
3083                 printf("ERROR - Prioritized Directed test FAILED.\n");
3084                 return ret;
3085         }
3086         printf("*** Running Prioritized Atomic test...\n");
3087         ret = test_priority_atomic(t);
3088         if (ret != 0) {
3089                 printf("ERROR - Prioritized Atomic test FAILED.\n");
3090                 return ret;
3091         }
3092
3093         printf("*** Running Prioritized Ordered test...\n");
3094         ret = test_priority_ordered(t);
3095         if (ret != 0) {
3096                 printf("ERROR - Prioritized Ordered test FAILED.\n");
3097                 return ret;
3098         }
3099         printf("*** Running Prioritized Unordered test...\n");
3100         ret = test_priority_unordered(t);
3101         if (ret != 0) {
3102                 printf("ERROR - Prioritized Unordered test FAILED.\n");
3103                 return ret;
3104         }
3105         printf("*** Running Invalid QID test...\n");
3106         ret = invalid_qid(t);
3107         if (ret != 0) {
3108                 printf("ERROR - Invalid QID test FAILED.\n");
3109                 return ret;
3110         }
3111         printf("*** Running Load Balancing History test...\n");
3112         ret = load_balancing_history(t);
3113         if (ret != 0) {
3114                 printf("ERROR - Load Balancing History test FAILED.\n");
3115                 return ret;
3116         }
3117         printf("*** Running Inflight Count test...\n");
3118         ret = inflight_counts(t);
3119         if (ret != 0) {
3120                 printf("ERROR - Inflight Count test FAILED.\n");
3121                 return ret;
3122         }
3123         printf("*** Running Abuse Inflights test...\n");
3124         ret = abuse_inflights(t);
3125         if (ret != 0) {
3126                 printf("ERROR - Abuse Inflights test FAILED.\n");
3127                 return ret;
3128         }
3129         printf("*** Running XStats test...\n");
3130         ret = xstats_tests(t);
3131         if (ret != 0) {
3132                 printf("ERROR - XStats test FAILED.\n");
3133                 return ret;
3134         }
3135         printf("*** Running XStats ID Reset test...\n");
3136         ret = xstats_id_reset_tests(t);
3137         if (ret != 0) {
3138                 printf("ERROR - XStats ID Reset test FAILED.\n");
3139                 return ret;
3140         }
3141         printf("*** Running XStats Brute Force test...\n");
3142         ret = xstats_brute_force(t);
3143         if (ret != 0) {
3144                 printf("ERROR - XStats Brute Force test FAILED.\n");
3145                 return ret;
3146         }
3147         printf("*** Running XStats ID Abuse test...\n");
3148         ret = xstats_id_abuse_tests(t);
3149         if (ret != 0) {
3150                 printf("ERROR - XStats ID Abuse test FAILED.\n");
3151                 return ret;
3152         }
3153         printf("*** Running QID Priority test...\n");
3154         ret = qid_priorities(t);
3155         if (ret != 0) {
3156                 printf("ERROR - QID Priority test FAILED.\n");
3157                 return ret;
3158         }
3159         printf("*** Running Ordered Reconfigure test...\n");
3160         ret = ordered_reconfigure(t);
3161         if (ret != 0) {
3162                 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3163                 return ret;
3164         }
3165         printf("*** Running Port LB Single Reconfig test...\n");
3166         ret = port_single_lb_reconfig(t);
3167         if (ret != 0) {
3168                 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3169                 return ret;
3170         }
3171         printf("*** Running Port Reconfig Credits test...\n");
3172         ret = port_reconfig_credits(t);
3173         if (ret != 0) {
3174                 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3175                 return ret;
3176         }
3177         printf("*** Running Head-of-line-blocking test...\n");
3178         ret = holb(t);
3179         if (ret != 0) {
3180                 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3181                 return ret;
3182         }
3183         if (rte_lcore_count() >= 3) {
3184                 printf("*** Running Worker loopback test...\n");
3185                 ret = worker_loopback(t);
3186                 if (ret != 0) {
3187                         printf("ERROR - Worker loopback test FAILED.\n");
3188                         return ret;
3189                 }
3190         } else {
3191                 printf("### Not enough cores for worker loopback test.\n");
3192                 printf("### Need at least 3 cores for test.\n");
3193         }
3194         /*
3195          * Free test instance, leaving mempool initialized, and a pointer to it
3196          * in static eventdev_func_mempool, as it is re-used on re-runs
3197          */
3198         free(t);
3199
3200         return 0;
3201 }
3202
3203 REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);