test/eventdev: add SW tests for load balancing
[dpdk.git] / test / test / test_eventdev_sw.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <string.h>
36 #include <stdint.h>
37 #include <errno.h>
38 #include <unistd.h>
39 #include <sys/queue.h>
40
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_launch.h>
44 #include <rte_eal.h>
45 #include <rte_per_lcore.h>
46 #include <rte_lcore.h>
47 #include <rte_debug.h>
48 #include <rte_ethdev.h>
49 #include <rte_cycles.h>
50
51 #include <rte_eventdev.h>
52 #include "test.h"
53
54 #define MAX_PORTS 16
55 #define MAX_QIDS 16
56 #define NUM_PACKETS (1<<18)
57
58 static int evdev;
59
60 struct test {
61         struct rte_mempool *mbuf_pool;
62         uint8_t port[MAX_PORTS];
63         uint8_t qid[MAX_QIDS];
64         int nb_qids;
65 };
66
67 static struct rte_event release_ev;
68
69 static inline struct rte_mbuf *
70 rte_gen_arp(int portid, struct rte_mempool *mp)
71 {
72         /*
73          * len = 14 + 46
74          * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
75          */
76         static const uint8_t arp_request[] = {
77                 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
78                 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
79                 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
80                 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
81                 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
82                 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
83                 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
84                 0x00, 0x00, 0x00, 0x00
85         };
86         struct rte_mbuf *m;
87         int pkt_len = sizeof(arp_request) - 1;
88
89         m = rte_pktmbuf_alloc(mp);
90         if (!m)
91                 return 0;
92
93         memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
94                 arp_request, pkt_len);
95         rte_pktmbuf_pkt_len(m) = pkt_len;
96         rte_pktmbuf_data_len(m) = pkt_len;
97
98         RTE_SET_USED(portid);
99
100         return m;
101 }
102
103 /* initialization and config */
104 static inline int
105 init(struct test *t, int nb_queues, int nb_ports)
106 {
107         struct rte_event_dev_config config = {
108                         .nb_event_queues = nb_queues,
109                         .nb_event_ports = nb_ports,
110                         .nb_event_queue_flows = 1024,
111                         .nb_events_limit = 4096,
112                         .nb_event_port_dequeue_depth = 128,
113                         .nb_event_port_enqueue_depth = 128,
114         };
115         int ret;
116
117         void *temp = t->mbuf_pool; /* save and restore mbuf pool */
118
119         memset(t, 0, sizeof(*t));
120         t->mbuf_pool = temp;
121
122         ret = rte_event_dev_configure(evdev, &config);
123         if (ret < 0)
124                 printf("%d: Error configuring device\n", __LINE__);
125         return ret;
126 };
127
128 static inline int
129 create_ports(struct test *t, int num_ports)
130 {
131         int i;
132         static const struct rte_event_port_conf conf = {
133                         .new_event_threshold = 1024,
134                         .dequeue_depth = 32,
135                         .enqueue_depth = 64,
136         };
137         if (num_ports > MAX_PORTS)
138                 return -1;
139
140         for (i = 0; i < num_ports; i++) {
141                 if (rte_event_port_setup(evdev, i, &conf) < 0) {
142                         printf("Error setting up port %d\n", i);
143                         return -1;
144                 }
145                 t->port[i] = i;
146         }
147
148         return 0;
149 }
150
151 static inline int
152 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
153 {
154         int i;
155
156         /* Q creation */
157         const struct rte_event_queue_conf conf = {
158                         .event_queue_cfg = flags,
159                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
160                         .nb_atomic_flows = 1024,
161                         .nb_atomic_order_sequences = 1024,
162         };
163
164         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
165                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
166                         printf("%d: error creating qid %d\n", __LINE__, i);
167                         return -1;
168                 }
169                 t->qid[i] = i;
170         }
171         t->nb_qids += num_qids;
172         if (t->nb_qids > MAX_QIDS)
173                 return -1;
174
175         return 0;
176 }
177
178 static inline int
179 create_atomic_qids(struct test *t, int num_qids)
180 {
181         return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY);
182 }
183
184 static inline int
185 create_ordered_qids(struct test *t, int num_qids)
186 {
187         return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ORDERED_ONLY);
188 }
189
190
191 static inline int
192 create_unordered_qids(struct test *t, int num_qids)
193 {
194         return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY);
195 }
196
197 static inline int
198 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
199 {
200         int i;
201
202         /* Q creation */
203         static const struct rte_event_queue_conf conf = {
204                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
205                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
206                         .nb_atomic_flows = 1024,
207                         .nb_atomic_order_sequences = 1024,
208         };
209
210         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
211                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
212                         printf("%d: error creating qid %d\n", __LINE__, i);
213                         return -1;
214                 }
215                 t->qid[i] = i;
216
217                 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
218                                 &t->qid[i], NULL, 1) != 1) {
219                         printf("%d: error creating link for qid %d\n",
220                                         __LINE__, i);
221                         return -1;
222                 }
223         }
224         t->nb_qids += num_qids;
225         if (t->nb_qids > MAX_QIDS)
226                 return -1;
227
228         return 0;
229 }
230
231 /* destruction */
232 static inline int
233 cleanup(struct test *t __rte_unused)
234 {
235         rte_event_dev_stop(evdev);
236         rte_event_dev_close(evdev);
237         return 0;
238 };
239
240 struct test_event_dev_stats {
241         uint64_t rx_pkts;       /**< Total packets received */
242         uint64_t rx_dropped;    /**< Total packets dropped (Eg Invalid QID) */
243         uint64_t tx_pkts;       /**< Total packets transmitted */
244
245         /** Packets received on this port */
246         uint64_t port_rx_pkts[MAX_PORTS];
247         /** Packets dropped on this port */
248         uint64_t port_rx_dropped[MAX_PORTS];
249         /** Packets inflight on this port */
250         uint64_t port_inflight[MAX_PORTS];
251         /** Packets transmitted on this port */
252         uint64_t port_tx_pkts[MAX_PORTS];
253         /** Packets received on this qid */
254         uint64_t qid_rx_pkts[MAX_QIDS];
255         /** Packets dropped on this qid */
256         uint64_t qid_rx_dropped[MAX_QIDS];
257         /** Packets transmitted on this qid */
258         uint64_t qid_tx_pkts[MAX_QIDS];
259 };
260
261 static inline int
262 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
263 {
264         static uint32_t i;
265         static uint32_t total_ids[3]; /* rx, tx and drop */
266         static uint32_t port_rx_pkts_ids[MAX_PORTS];
267         static uint32_t port_rx_dropped_ids[MAX_PORTS];
268         static uint32_t port_inflight_ids[MAX_PORTS];
269         static uint32_t port_tx_pkts_ids[MAX_PORTS];
270         static uint32_t qid_rx_pkts_ids[MAX_QIDS];
271         static uint32_t qid_rx_dropped_ids[MAX_QIDS];
272         static uint32_t qid_tx_pkts_ids[MAX_QIDS];
273
274
275         stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
276                         "dev_rx", &total_ids[0]);
277         stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
278                         "dev_drop", &total_ids[1]);
279         stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
280                         "dev_tx", &total_ids[2]);
281         for (i = 0; i < MAX_PORTS; i++) {
282                 char name[32];
283                 snprintf(name, sizeof(name), "port_%u_rx", i);
284                 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
285                                 dev_id, name, &port_rx_pkts_ids[i]);
286                 snprintf(name, sizeof(name), "port_%u_drop", i);
287                 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
288                                 dev_id, name, &port_rx_dropped_ids[i]);
289                 snprintf(name, sizeof(name), "port_%u_inflight", i);
290                 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
291                                 dev_id, name, &port_inflight_ids[i]);
292                 snprintf(name, sizeof(name), "port_%u_tx", i);
293                 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
294                                 dev_id, name, &port_tx_pkts_ids[i]);
295         }
296         for (i = 0; i < MAX_QIDS; i++) {
297                 char name[32];
298                 snprintf(name, sizeof(name), "qid_%u_rx", i);
299                 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
300                                 dev_id, name, &qid_rx_pkts_ids[i]);
301                 snprintf(name, sizeof(name), "qid_%u_drop", i);
302                 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
303                                 dev_id, name, &qid_rx_dropped_ids[i]);
304                 snprintf(name, sizeof(name), "qid_%u_tx", i);
305                 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
306                                 dev_id, name, &qid_tx_pkts_ids[i]);
307         }
308
309         return 0;
310 }
311
312 /* run_prio_packet_test
313  * This performs a basic packet priority check on the test instance passed in.
314  * It is factored out of the main priority tests as the same tests must be
315  * performed to ensure prioritization of each type of QID.
316  *
317  * Requirements:
318  *  - An initialized test structure, including mempool
319  *  - t->port[0] is initialized for both Enq / Deq of packets to the QID
320  *  - t->qid[0] is the QID to be tested
321  *  - if LB QID, the CQ must be mapped to the QID.
322  */
323 static int
324 run_prio_packet_test(struct test *t)
325 {
326         int err;
327         const uint32_t MAGIC_SEQN[] = {4711, 1234};
328         const uint32_t PRIORITY[] = {
329                 RTE_EVENT_DEV_PRIORITY_NORMAL,
330                 RTE_EVENT_DEV_PRIORITY_HIGHEST
331         };
332         unsigned int i;
333         for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
334                 /* generate pkt and enqueue */
335                 struct rte_event ev;
336                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
337                 if (!arp) {
338                         printf("%d: gen of pkt failed\n", __LINE__);
339                         return -1;
340                 }
341                 arp->seqn = MAGIC_SEQN[i];
342
343                 ev = (struct rte_event){
344                         .priority = PRIORITY[i],
345                         .op = RTE_EVENT_OP_NEW,
346                         .queue_id = t->qid[0],
347                         .mbuf = arp
348                 };
349                 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
350                 if (err < 0) {
351                         printf("%d: error failed to enqueue\n", __LINE__);
352                         return -1;
353                 }
354         }
355
356         rte_event_schedule(evdev);
357
358         struct test_event_dev_stats stats;
359         err = test_event_dev_stats_get(evdev, &stats);
360         if (err) {
361                 printf("%d: error failed to get stats\n", __LINE__);
362                 return -1;
363         }
364
365         if (stats.port_rx_pkts[t->port[0]] != 2) {
366                 printf("%d: error stats incorrect for directed port\n",
367                                 __LINE__);
368                 rte_event_dev_dump(evdev, stdout);
369                 return -1;
370         }
371
372         struct rte_event ev, ev2;
373         uint32_t deq_pkts;
374         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
375         if (deq_pkts != 1) {
376                 printf("%d: error failed to deq\n", __LINE__);
377                 rte_event_dev_dump(evdev, stdout);
378                 return -1;
379         }
380         if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
381                 printf("%d: first packet out not highest priority\n",
382                                 __LINE__);
383                 rte_event_dev_dump(evdev, stdout);
384                 return -1;
385         }
386         rte_pktmbuf_free(ev.mbuf);
387
388         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
389         if (deq_pkts != 1) {
390                 printf("%d: error failed to deq\n", __LINE__);
391                 rte_event_dev_dump(evdev, stdout);
392                 return -1;
393         }
394         if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
395                 printf("%d: second packet out not lower priority\n",
396                                 __LINE__);
397                 rte_event_dev_dump(evdev, stdout);
398                 return -1;
399         }
400         rte_pktmbuf_free(ev2.mbuf);
401
402         cleanup(t);
403         return 0;
404 }
405
406 static int
407 test_single_directed_packet(struct test *t)
408 {
409         const int rx_enq = 0;
410         const int wrk_enq = 2;
411         int err;
412
413         /* Create instance with 3 directed QIDs going to 3 ports */
414         if (init(t, 3, 3) < 0 ||
415                         create_ports(t, 3) < 0 ||
416                         create_directed_qids(t, 3, t->port) < 0)
417                 return -1;
418
419         if (rte_event_dev_start(evdev) < 0) {
420                 printf("%d: Error with start call\n", __LINE__);
421                 return -1;
422         }
423
424         /************** FORWARD ****************/
425         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
426         struct rte_event ev = {
427                         .op = RTE_EVENT_OP_NEW,
428                         .queue_id = wrk_enq,
429                         .mbuf = arp,
430         };
431
432         if (!arp) {
433                 printf("%d: gen of pkt failed\n", __LINE__);
434                 return -1;
435         }
436
437         const uint32_t MAGIC_SEQN = 4711;
438         arp->seqn = MAGIC_SEQN;
439
440         /* generate pkt and enqueue */
441         err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
442         if (err < 0) {
443                 printf("%d: error failed to enqueue\n", __LINE__);
444                 return -1;
445         }
446
447         /* Run schedule() as dir packets may need to be re-ordered */
448         rte_event_schedule(evdev);
449
450         struct test_event_dev_stats stats;
451         err = test_event_dev_stats_get(evdev, &stats);
452         if (err) {
453                 printf("%d: error failed to get stats\n", __LINE__);
454                 return -1;
455         }
456
457         if (stats.port_rx_pkts[rx_enq] != 1) {
458                 printf("%d: error stats incorrect for directed port\n",
459                                 __LINE__);
460                 return -1;
461         }
462
463         uint32_t deq_pkts;
464         deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
465         if (deq_pkts != 1) {
466                 printf("%d: error failed to deq\n", __LINE__);
467                 return -1;
468         }
469
470         err = test_event_dev_stats_get(evdev, &stats);
471         if (stats.port_rx_pkts[wrk_enq] != 0 &&
472                         stats.port_rx_pkts[wrk_enq] != 1) {
473                 printf("%d: error directed stats post-dequeue\n", __LINE__);
474                 return -1;
475         }
476
477         if (ev.mbuf->seqn != MAGIC_SEQN) {
478                 printf("%d: error magic sequence number not dequeued\n",
479                                 __LINE__);
480                 return -1;
481         }
482
483         rte_pktmbuf_free(ev.mbuf);
484         cleanup(t);
485         return 0;
486 }
487
488
489 static int
490 test_priority_directed(struct test *t)
491 {
492         if (init(t, 1, 1) < 0 ||
493                         create_ports(t, 1) < 0 ||
494                         create_directed_qids(t, 1, t->port) < 0) {
495                 printf("%d: Error initializing device\n", __LINE__);
496                 return -1;
497         }
498
499         if (rte_event_dev_start(evdev) < 0) {
500                 printf("%d: Error with start call\n", __LINE__);
501                 return -1;
502         }
503
504         return run_prio_packet_test(t);
505 }
506
507 static int
508 test_priority_atomic(struct test *t)
509 {
510         if (init(t, 1, 1) < 0 ||
511                         create_ports(t, 1) < 0 ||
512                         create_atomic_qids(t, 1) < 0) {
513                 printf("%d: Error initializing device\n", __LINE__);
514                 return -1;
515         }
516
517         /* map the QID */
518         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
519                 printf("%d: error mapping qid to port\n", __LINE__);
520                 return -1;
521         }
522         if (rte_event_dev_start(evdev) < 0) {
523                 printf("%d: Error with start call\n", __LINE__);
524                 return -1;
525         }
526
527         return run_prio_packet_test(t);
528 }
529
530 static int
531 test_priority_ordered(struct test *t)
532 {
533         if (init(t, 1, 1) < 0 ||
534                         create_ports(t, 1) < 0 ||
535                         create_ordered_qids(t, 1) < 0) {
536                 printf("%d: Error initializing device\n", __LINE__);
537                 return -1;
538         }
539
540         /* map the QID */
541         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
542                 printf("%d: error mapping qid to port\n", __LINE__);
543                 return -1;
544         }
545         if (rte_event_dev_start(evdev) < 0) {
546                 printf("%d: Error with start call\n", __LINE__);
547                 return -1;
548         }
549
550         return run_prio_packet_test(t);
551 }
552
553 static int
554 test_priority_unordered(struct test *t)
555 {
556         if (init(t, 1, 1) < 0 ||
557                         create_ports(t, 1) < 0 ||
558                         create_unordered_qids(t, 1) < 0) {
559                 printf("%d: Error initializing device\n", __LINE__);
560                 return -1;
561         }
562
563         /* map the QID */
564         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
565                 printf("%d: error mapping qid to port\n", __LINE__);
566                 return -1;
567         }
568         if (rte_event_dev_start(evdev) < 0) {
569                 printf("%d: Error with start call\n", __LINE__);
570                 return -1;
571         }
572
573         return run_prio_packet_test(t);
574 }
575
576 static int
577 burst_packets(struct test *t)
578 {
579         /************** CONFIG ****************/
580         uint32_t i;
581         int err;
582         int ret;
583
584         /* Create instance with 2 ports and 2 queues */
585         if (init(t, 2, 2) < 0 ||
586                         create_ports(t, 2) < 0 ||
587                         create_atomic_qids(t, 2) < 0) {
588                 printf("%d: Error initializing device\n", __LINE__);
589                 return -1;
590         }
591
592         /* CQ mapping to QID */
593         ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
594         if (ret != 1) {
595                 printf("%d: error mapping lb qid0\n", __LINE__);
596                 return -1;
597         }
598         ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
599         if (ret != 1) {
600                 printf("%d: error mapping lb qid1\n", __LINE__);
601                 return -1;
602         }
603
604         if (rte_event_dev_start(evdev) < 0) {
605                 printf("%d: Error with start call\n", __LINE__);
606                 return -1;
607         }
608
609         /************** FORWARD ****************/
610         const uint32_t rx_port = 0;
611         const uint32_t NUM_PKTS = 2;
612
613         for (i = 0; i < NUM_PKTS; i++) {
614                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
615                 if (!arp) {
616                         printf("%d: error generating pkt\n", __LINE__);
617                         return -1;
618                 }
619
620                 struct rte_event ev = {
621                                 .op = RTE_EVENT_OP_NEW,
622                                 .queue_id = i % 2,
623                                 .flow_id = i % 3,
624                                 .mbuf = arp,
625                 };
626                 /* generate pkt and enqueue */
627                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
628                 if (err < 0) {
629                         printf("%d: Failed to enqueue\n", __LINE__);
630                         return -1;
631                 }
632         }
633         rte_event_schedule(evdev);
634
635         /* Check stats for all NUM_PKTS arrived to sched core */
636         struct test_event_dev_stats stats;
637
638         err = test_event_dev_stats_get(evdev, &stats);
639         if (err) {
640                 printf("%d: failed to get stats\n", __LINE__);
641                 return -1;
642         }
643         if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
644                 printf("%d: Sched core didn't receive all %d pkts\n",
645                                 __LINE__, NUM_PKTS);
646                 rte_event_dev_dump(evdev, stdout);
647                 return -1;
648         }
649
650         uint32_t deq_pkts;
651         int p;
652
653         deq_pkts = 0;
654         /******** DEQ QID 1 *******/
655         do {
656                 struct rte_event ev;
657                 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
658                 deq_pkts += p;
659                 rte_pktmbuf_free(ev.mbuf);
660         } while (p);
661
662         if (deq_pkts != NUM_PKTS/2) {
663                 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
664                                 __LINE__);
665                 return -1;
666         }
667
668         /******** DEQ QID 2 *******/
669         deq_pkts = 0;
670         do {
671                 struct rte_event ev;
672                 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
673                 deq_pkts += p;
674                 rte_pktmbuf_free(ev.mbuf);
675         } while (p);
676         if (deq_pkts != NUM_PKTS/2) {
677                 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
678                                 __LINE__);
679                 return -1;
680         }
681
682         cleanup(t);
683         return 0;
684 }
685
686 static int
687 abuse_inflights(struct test *t)
688 {
689         const int rx_enq = 0;
690         const int wrk_enq = 2;
691         int err;
692
693         /* Create instance with 4 ports */
694         if (init(t, 1, 4) < 0 ||
695                         create_ports(t, 4) < 0 ||
696                         create_atomic_qids(t, 1) < 0) {
697                 printf("%d: Error initializing device\n", __LINE__);
698                 return -1;
699         }
700
701         /* CQ mapping to QID */
702         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
703         if (err != 1) {
704                 printf("%d: error mapping lb qid\n", __LINE__);
705                 cleanup(t);
706                 return -1;
707         }
708
709         if (rte_event_dev_start(evdev) < 0) {
710                 printf("%d: Error with start call\n", __LINE__);
711                 return -1;
712         }
713
714         /* Enqueue op only */
715         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
716         if (err < 0) {
717                 printf("%d: Failed to enqueue\n", __LINE__);
718                 return -1;
719         }
720
721         /* schedule */
722         rte_event_schedule(evdev);
723
724         struct test_event_dev_stats stats;
725
726         err = test_event_dev_stats_get(evdev, &stats);
727         if (err) {
728                 printf("%d: failed to get stats\n", __LINE__);
729                 return -1;
730         }
731
732         if (stats.rx_pkts != 0 ||
733                         stats.tx_pkts != 0 ||
734                         stats.port_inflight[wrk_enq] != 0) {
735                 printf("%d: Sched core didn't handle pkt as expected\n",
736                                 __LINE__);
737                 return -1;
738         }
739
740         cleanup(t);
741         return 0;
742 }
743
744 static int
745 port_reconfig_credits(struct test *t)
746 {
747         if (init(t, 1, 1) < 0) {
748                 printf("%d: Error initializing device\n", __LINE__);
749                 return -1;
750         }
751
752         uint32_t i;
753         const uint32_t NUM_ITERS = 32;
754         for (i = 0; i < NUM_ITERS; i++) {
755                 const struct rte_event_queue_conf conf = {
756                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
757                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
758                         .nb_atomic_flows = 1024,
759                         .nb_atomic_order_sequences = 1024,
760                 };
761                 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
762                         printf("%d: error creating qid\n", __LINE__);
763                         return -1;
764                 }
765                 t->qid[0] = 0;
766
767                 static const struct rte_event_port_conf port_conf = {
768                                 .new_event_threshold = 128,
769                                 .dequeue_depth = 32,
770                                 .enqueue_depth = 64,
771                 };
772                 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
773                         printf("%d Error setting up port\n", __LINE__);
774                         return -1;
775                 }
776
777                 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
778                 if (links != 1) {
779                         printf("%d: error mapping lb qid\n", __LINE__);
780                         goto fail;
781                 }
782
783                 if (rte_event_dev_start(evdev) < 0) {
784                         printf("%d: Error with start call\n", __LINE__);
785                         goto fail;
786                 }
787
788                 const uint32_t NPKTS = 1;
789                 uint32_t j;
790                 for (j = 0; j < NPKTS; j++) {
791                         struct rte_event ev;
792                         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
793                         if (!arp) {
794                                 printf("%d: gen of pkt failed\n", __LINE__);
795                                 goto fail;
796                         }
797                         ev.queue_id = t->qid[0];
798                         ev.op = RTE_EVENT_OP_NEW;
799                         ev.mbuf = arp;
800                         int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
801                         if (err != 1) {
802                                 printf("%d: Failed to enqueue\n", __LINE__);
803                                 rte_event_dev_dump(0, stdout);
804                                 goto fail;
805                         }
806                 }
807
808                 rte_event_schedule(evdev);
809
810                 struct rte_event ev[NPKTS];
811                 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
812                                                         NPKTS, 0);
813                 if (deq != 1)
814                         printf("%d error; no packet dequeued\n", __LINE__);
815
816                 /* let cleanup below stop the device on last iter */
817                 if (i != NUM_ITERS-1)
818                         rte_event_dev_stop(evdev);
819         }
820
821         cleanup(t);
822         return 0;
823 fail:
824         cleanup(t);
825         return -1;
826 }
827
828 static int
829 port_single_lb_reconfig(struct test *t)
830 {
831         if (init(t, 2, 2) < 0) {
832                 printf("%d: Error initializing device\n", __LINE__);
833                 goto fail;
834         }
835
836         static const struct rte_event_queue_conf conf_lb_atomic = {
837                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
838                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
839                 .nb_atomic_flows = 1024,
840                 .nb_atomic_order_sequences = 1024,
841         };
842         if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
843                 printf("%d: error creating qid\n", __LINE__);
844                 goto fail;
845         }
846
847         static const struct rte_event_queue_conf conf_single_link = {
848                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
849                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
850                 .nb_atomic_flows = 1024,
851                 .nb_atomic_order_sequences = 1024,
852         };
853         if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
854                 printf("%d: error creating qid\n", __LINE__);
855                 goto fail;
856         }
857
858         struct rte_event_port_conf port_conf = {
859                 .new_event_threshold = 128,
860                 .dequeue_depth = 32,
861                 .enqueue_depth = 64,
862         };
863         if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
864                 printf("%d Error setting up port\n", __LINE__);
865                 goto fail;
866         }
867         if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
868                 printf("%d Error setting up port\n", __LINE__);
869                 goto fail;
870         }
871
872         /* link port to lb queue */
873         uint8_t queue_id = 0;
874         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
875                 printf("%d: error creating link for qid\n", __LINE__);
876                 goto fail;
877         }
878
879         int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
880         if (ret != 1) {
881                 printf("%d: Error unlinking lb port\n", __LINE__);
882                 goto fail;
883         }
884
885         queue_id = 1;
886         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
887                 printf("%d: error creating link for qid\n", __LINE__);
888                 goto fail;
889         }
890
891         queue_id = 0;
892         int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
893         if (err != 1) {
894                 printf("%d: error mapping lb qid\n", __LINE__);
895                 goto fail;
896         }
897
898         if (rte_event_dev_start(evdev) < 0) {
899                 printf("%d: Error with start call\n", __LINE__);
900                 goto fail;
901         }
902
903         cleanup(t);
904         return 0;
905 fail:
906         cleanup(t);
907         return -1;
908 }
909
910 static int
911 ordered_reconfigure(struct test *t)
912 {
913         if (init(t, 1, 1) < 0 ||
914                         create_ports(t, 1) < 0) {
915                 printf("%d: Error initializing device\n", __LINE__);
916                 return -1;
917         }
918
919         const struct rte_event_queue_conf conf = {
920                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
921                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
922                         .nb_atomic_flows = 1024,
923                         .nb_atomic_order_sequences = 1024,
924         };
925
926         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
927                 printf("%d: error creating qid\n", __LINE__);
928                 goto failed;
929         }
930
931         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
932                 printf("%d: error creating qid, for 2nd time\n", __LINE__);
933                 goto failed;
934         }
935
936         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
937         if (rte_event_dev_start(evdev) < 0) {
938                 printf("%d: Error with start call\n", __LINE__);
939                 return -1;
940         }
941
942         cleanup(t);
943         return 0;
944 failed:
945         cleanup(t);
946         return -1;
947 }
948
949 static int
950 qid_priorities(struct test *t)
951 {
952         /* Test works by having a CQ with enough empty space for all packets,
953          * and enqueueing 3 packets to 3 QIDs. They must return based on the
954          * priority of the QID, not the ingress order, to pass the test
955          */
956         unsigned int i;
957         /* Create instance with 1 ports, and 3 qids */
958         if (init(t, 3, 1) < 0 ||
959                         create_ports(t, 1) < 0) {
960                 printf("%d: Error initializing device\n", __LINE__);
961                 return -1;
962         }
963
964         for (i = 0; i < 3; i++) {
965                 /* Create QID */
966                 const struct rte_event_queue_conf conf = {
967                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
968                         /* increase priority (0 == highest), as we go */
969                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
970                         .nb_atomic_flows = 1024,
971                         .nb_atomic_order_sequences = 1024,
972                 };
973
974                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
975                         printf("%d: error creating qid %d\n", __LINE__, i);
976                         return -1;
977                 }
978                 t->qid[i] = i;
979         }
980         t->nb_qids = i;
981         /* map all QIDs to port */
982         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
983
984         if (rte_event_dev_start(evdev) < 0) {
985                 printf("%d: Error with start call\n", __LINE__);
986                 return -1;
987         }
988
989         /* enqueue 3 packets, setting seqn and QID to check priority */
990         for (i = 0; i < 3; i++) {
991                 struct rte_event ev;
992                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
993                 if (!arp) {
994                         printf("%d: gen of pkt failed\n", __LINE__);
995                         return -1;
996                 }
997                 ev.queue_id = t->qid[i];
998                 ev.op = RTE_EVENT_OP_NEW;
999                 ev.mbuf = arp;
1000                 arp->seqn = i;
1001
1002                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1003                 if (err != 1) {
1004                         printf("%d: Failed to enqueue\n", __LINE__);
1005                         return -1;
1006                 }
1007         }
1008
1009         rte_event_schedule(evdev);
1010
1011         /* dequeue packets, verify priority was upheld */
1012         struct rte_event ev[32];
1013         uint32_t deq_pkts =
1014                 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1015         if (deq_pkts != 3) {
1016                 printf("%d: failed to deq packets\n", __LINE__);
1017                 rte_event_dev_dump(evdev, stdout);
1018                 return -1;
1019         }
1020         for (i = 0; i < 3; i++) {
1021                 if (ev[i].mbuf->seqn != 2-i) {
1022                         printf(
1023                                 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1024                                         __LINE__, i);
1025                 }
1026         }
1027
1028         cleanup(t);
1029         return 0;
1030 }
1031
1032 static int
1033 load_balancing(struct test *t)
1034 {
1035         const int rx_enq = 0;
1036         int err;
1037         uint32_t i;
1038
1039         if (init(t, 1, 4) < 0 ||
1040                         create_ports(t, 4) < 0 ||
1041                         create_atomic_qids(t, 1) < 0) {
1042                 printf("%d: Error initializing device\n", __LINE__);
1043                 return -1;
1044         }
1045
1046         for (i = 0; i < 3; i++) {
1047                 /* map port 1 - 3 inclusive */
1048                 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1049                                 NULL, 1) != 1) {
1050                         printf("%d: error mapping qid to port %d\n",
1051                                         __LINE__, i);
1052                         return -1;
1053                 }
1054         }
1055
1056         if (rte_event_dev_start(evdev) < 0) {
1057                 printf("%d: Error with start call\n", __LINE__);
1058                 return -1;
1059         }
1060
1061         /************** FORWARD ****************/
1062         /*
1063          * Create a set of flows that test the load-balancing operation of the
1064          * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
1065          * with a new flow, which should be sent to the 3rd mapped CQ
1066          */
1067         static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
1068
1069         for (i = 0; i < RTE_DIM(flows); i++) {
1070                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1071                 if (!arp) {
1072                         printf("%d: gen of pkt failed\n", __LINE__);
1073                         return -1;
1074                 }
1075
1076                 struct rte_event ev = {
1077                                 .op = RTE_EVENT_OP_NEW,
1078                                 .queue_id = t->qid[0],
1079                                 .flow_id = flows[i],
1080                                 .mbuf = arp,
1081                 };
1082                 /* generate pkt and enqueue */
1083                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1084                 if (err < 0) {
1085                         printf("%d: Failed to enqueue\n", __LINE__);
1086                         return -1;
1087                 }
1088         }
1089
1090         rte_event_schedule(evdev);
1091
1092         struct test_event_dev_stats stats;
1093         err = test_event_dev_stats_get(evdev, &stats);
1094         if (err) {
1095                 printf("%d: failed to get stats\n", __LINE__);
1096                 return -1;
1097         }
1098
1099         if (stats.port_inflight[1] != 4) {
1100                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
1101                                 __func__);
1102                 return -1;
1103         }
1104         if (stats.port_inflight[2] != 2) {
1105                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
1106                                 __func__);
1107                 return -1;
1108         }
1109         if (stats.port_inflight[3] != 3) {
1110                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
1111                                 __func__);
1112                 return -1;
1113         }
1114
1115         cleanup(t);
1116         return 0;
1117 }
1118
1119 static int
1120 load_balancing_history(struct test *t)
1121 {
1122         struct test_event_dev_stats stats = {0};
1123         const int rx_enq = 0;
1124         int err;
1125         uint32_t i;
1126
1127         /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
1128         if (init(t, 1, 4) < 0 ||
1129                         create_ports(t, 4) < 0 ||
1130                         create_atomic_qids(t, 1) < 0)
1131                 return -1;
1132
1133         /* CQ mapping to QID */
1134         if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
1135                 printf("%d: error mapping port 1 qid\n", __LINE__);
1136                 return -1;
1137         }
1138         if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
1139                 printf("%d: error mapping port 2 qid\n", __LINE__);
1140                 return -1;
1141         }
1142         if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
1143                 printf("%d: error mapping port 3 qid\n", __LINE__);
1144                 return -1;
1145         }
1146         if (rte_event_dev_start(evdev) < 0) {
1147                 printf("%d: Error with start call\n", __LINE__);
1148                 return -1;
1149         }
1150
1151         /*
1152          * Create a set of flows that test the load-balancing operation of the
1153          * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
1154          * the packet from CQ 0, send in a new set of flows. Ensure that:
1155          *  1. The new flow 3 gets into the empty CQ0
1156          *  2. packets for existing flow gets added into CQ1
1157          *  3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
1158          *     more outstanding pkts
1159          *
1160          *  This test makes sure that when a flow ends (i.e. all packets
1161          *  have been completed for that flow), that the flow can be moved
1162          *  to a different CQ when new packets come in for that flow.
1163          */
1164         static uint32_t flows1[] = {0, 1, 1, 2};
1165
1166         for (i = 0; i < RTE_DIM(flows1); i++) {
1167                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1168                 struct rte_event ev = {
1169                                 .flow_id = flows1[i],
1170                                 .op = RTE_EVENT_OP_NEW,
1171                                 .queue_id = t->qid[0],
1172                                 .event_type = RTE_EVENT_TYPE_CPU,
1173                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1174                                 .mbuf = arp
1175                 };
1176
1177                 if (!arp) {
1178                         printf("%d: gen of pkt failed\n", __LINE__);
1179                         return -1;
1180                 }
1181                 arp->hash.rss = flows1[i];
1182                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1183                 if (err < 0) {
1184                         printf("%d: Failed to enqueue\n", __LINE__);
1185                         return -1;
1186                 }
1187         }
1188
1189         /* call the scheduler */
1190         rte_event_schedule(evdev);
1191
1192         /* Dequeue the flow 0 packet from port 1, so that we can then drop */
1193         struct rte_event ev;
1194         if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
1195                 printf("%d: failed to dequeue\n", __LINE__);
1196                 return -1;
1197         }
1198         if (ev.mbuf->hash.rss != flows1[0]) {
1199                 printf("%d: unexpected flow received\n", __LINE__);
1200                 return -1;
1201         }
1202
1203         /* drop the flow 0 packet from port 1 */
1204         rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
1205
1206         /* call the scheduler */
1207         rte_event_schedule(evdev);
1208
1209         /*
1210          * Set up the next set of flows, first a new flow to fill up
1211          * CQ 0, so that the next flow 0 packet should go to CQ2
1212          */
1213         static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
1214
1215         for (i = 0; i < RTE_DIM(flows2); i++) {
1216                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1217                 struct rte_event ev = {
1218                                 .flow_id = flows2[i],
1219                                 .op = RTE_EVENT_OP_NEW,
1220                                 .queue_id = t->qid[0],
1221                                 .event_type = RTE_EVENT_TYPE_CPU,
1222                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1223                                 .mbuf = arp
1224                 };
1225
1226                 if (!arp) {
1227                         printf("%d: gen of pkt failed\n", __LINE__);
1228                         return -1;
1229                 }
1230                 arp->hash.rss = flows2[i];
1231
1232                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1233                 if (err < 0) {
1234                         printf("%d: Failed to enqueue\n", __LINE__);
1235                         return -1;
1236                 }
1237         }
1238
1239         /* schedule */
1240         rte_event_schedule(evdev);
1241
1242         err = test_event_dev_stats_get(evdev, &stats);
1243         if (err) {
1244                 printf("%d:failed to get stats\n", __LINE__);
1245                 return -1;
1246         }
1247
1248         /*
1249          * Now check the resulting inflights on each port.
1250          */
1251         if (stats.port_inflight[1] != 3) {
1252                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
1253                                 __func__);
1254                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
1255                                 (unsigned int)stats.port_inflight[1],
1256                                 (unsigned int)stats.port_inflight[2],
1257                                 (unsigned int)stats.port_inflight[3]);
1258                 return -1;
1259         }
1260         if (stats.port_inflight[2] != 4) {
1261                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
1262                                 __func__);
1263                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
1264                                 (unsigned int)stats.port_inflight[1],
1265                                 (unsigned int)stats.port_inflight[2],
1266                                 (unsigned int)stats.port_inflight[3]);
1267                 return -1;
1268         }
1269         if (stats.port_inflight[3] != 2) {
1270                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
1271                                 __func__);
1272                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
1273                                 (unsigned int)stats.port_inflight[1],
1274                                 (unsigned int)stats.port_inflight[2],
1275                                 (unsigned int)stats.port_inflight[3]);
1276                 return -1;
1277         }
1278
1279         for (i = 1; i <= 3; i++) {
1280                 struct rte_event ev;
1281                 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
1282                         rte_event_enqueue_burst(evdev, i, &release_ev, 1);
1283         }
1284         rte_event_schedule(evdev);
1285
1286         cleanup(t);
1287         return 0;
1288 }
1289
1290 static int
1291 invalid_qid(struct test *t)
1292 {
1293         struct test_event_dev_stats stats;
1294         const int rx_enq = 0;
1295         int err;
1296         uint32_t i;
1297
1298         if (init(t, 1, 4) < 0 ||
1299                         create_ports(t, 4) < 0 ||
1300                         create_atomic_qids(t, 1) < 0) {
1301                 printf("%d: Error initializing device\n", __LINE__);
1302                 return -1;
1303         }
1304
1305         /* CQ mapping to QID */
1306         for (i = 0; i < 4; i++) {
1307                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
1308                                 NULL, 1);
1309                 if (err != 1) {
1310                         printf("%d: error mapping port 1 qid\n", __LINE__);
1311                         return -1;
1312                 }
1313         }
1314
1315         if (rte_event_dev_start(evdev) < 0) {
1316                 printf("%d: Error with start call\n", __LINE__);
1317                 return -1;
1318         }
1319
1320         /*
1321          * Send in a packet with an invalid qid to the scheduler.
1322          * We should see the packed enqueued OK, but the inflights for
1323          * that packet should not be incremented, and the rx_dropped
1324          * should be incremented.
1325          */
1326         static uint32_t flows1[] = {20};
1327
1328         for (i = 0; i < RTE_DIM(flows1); i++) {
1329                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1330                 if (!arp) {
1331                         printf("%d: gen of pkt failed\n", __LINE__);
1332                         return -1;
1333                 }
1334
1335                 struct rte_event ev = {
1336                                 .op = RTE_EVENT_OP_NEW,
1337                                 .queue_id = t->qid[0] + flows1[i],
1338                                 .flow_id = i,
1339                                 .mbuf = arp,
1340                 };
1341                 /* generate pkt and enqueue */
1342                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1343                 if (err < 0) {
1344                         printf("%d: Failed to enqueue\n", __LINE__);
1345                         return -1;
1346                 }
1347         }
1348
1349         /* call the scheduler */
1350         rte_event_schedule(evdev);
1351
1352         err = test_event_dev_stats_get(evdev, &stats);
1353         if (err) {
1354                 printf("%d: failed to get stats\n", __LINE__);
1355                 return -1;
1356         }
1357
1358         /*
1359          * Now check the resulting inflights on the port, and the rx_dropped.
1360          */
1361         if (stats.port_inflight[0] != 0) {
1362                 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
1363                                 __func__);
1364                 rte_event_dev_dump(evdev, stdout);
1365                 return -1;
1366         }
1367         if (stats.port_rx_dropped[0] != 1) {
1368                 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
1369                 rte_event_dev_dump(evdev, stdout);
1370                 return -1;
1371         }
1372         /* each packet drop should only be counted in one place - port or dev */
1373         if (stats.rx_dropped != 0) {
1374                 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
1375                                 __func__);
1376                 rte_event_dev_dump(evdev, stdout);
1377                 return -1;
1378         }
1379
1380         cleanup(t);
1381         return 0;
1382 }
1383
1384 static int
1385 single_packet(struct test *t)
1386 {
1387         const uint32_t MAGIC_SEQN = 7321;
1388         struct rte_event ev;
1389         struct test_event_dev_stats stats;
1390         const int rx_enq = 0;
1391         const int wrk_enq = 2;
1392         int err;
1393
1394         /* Create instance with 4 ports */
1395         if (init(t, 1, 4) < 0 ||
1396                         create_ports(t, 4) < 0 ||
1397                         create_atomic_qids(t, 1) < 0) {
1398                 printf("%d: Error initializing device\n", __LINE__);
1399                 return -1;
1400         }
1401
1402         /* CQ mapping to QID */
1403         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1404         if (err != 1) {
1405                 printf("%d: error mapping lb qid\n", __LINE__);
1406                 cleanup(t);
1407                 return -1;
1408         }
1409
1410         if (rte_event_dev_start(evdev) < 0) {
1411                 printf("%d: Error with start call\n", __LINE__);
1412                 return -1;
1413         }
1414
1415         /************** Gen pkt and enqueue ****************/
1416         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1417         if (!arp) {
1418                 printf("%d: gen of pkt failed\n", __LINE__);
1419                 return -1;
1420         }
1421
1422         ev.op = RTE_EVENT_OP_NEW;
1423         ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
1424         ev.mbuf = arp;
1425         ev.queue_id = 0;
1426         ev.flow_id = 3;
1427         arp->seqn = MAGIC_SEQN;
1428
1429         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1430         if (err < 0) {
1431                 printf("%d: Failed to enqueue\n", __LINE__);
1432                 return -1;
1433         }
1434
1435         rte_event_schedule(evdev);
1436
1437         err = test_event_dev_stats_get(evdev, &stats);
1438         if (err) {
1439                 printf("%d: failed to get stats\n", __LINE__);
1440                 return -1;
1441         }
1442
1443         if (stats.rx_pkts != 1 ||
1444                         stats.tx_pkts != 1 ||
1445                         stats.port_inflight[wrk_enq] != 1) {
1446                 printf("%d: Sched core didn't handle pkt as expected\n",
1447                                 __LINE__);
1448                 rte_event_dev_dump(evdev, stdout);
1449                 return -1;
1450         }
1451
1452         uint32_t deq_pkts;
1453
1454         deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
1455         if (deq_pkts < 1) {
1456                 printf("%d: Failed to deq\n", __LINE__);
1457                 return -1;
1458         }
1459
1460         err = test_event_dev_stats_get(evdev, &stats);
1461         if (err) {
1462                 printf("%d: failed to get stats\n", __LINE__);
1463                 return -1;
1464         }
1465
1466         err = test_event_dev_stats_get(evdev, &stats);
1467         if (ev.mbuf->seqn != MAGIC_SEQN) {
1468                 printf("%d: magic sequence number not dequeued\n", __LINE__);
1469                 return -1;
1470         }
1471
1472         rte_pktmbuf_free(ev.mbuf);
1473         err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
1474         if (err < 0) {
1475                 printf("%d: Failed to enqueue\n", __LINE__);
1476                 return -1;
1477         }
1478         rte_event_schedule(evdev);
1479
1480         err = test_event_dev_stats_get(evdev, &stats);
1481         if (stats.port_inflight[wrk_enq] != 0) {
1482                 printf("%d: port inflight not correct\n", __LINE__);
1483                 return -1;
1484         }
1485
1486         cleanup(t);
1487         return 0;
1488 }
1489
1490 static int
1491 inflight_counts(struct test *t)
1492 {
1493         struct rte_event ev;
1494         struct test_event_dev_stats stats;
1495         const int rx_enq = 0;
1496         const int p1 = 1;
1497         const int p2 = 2;
1498         int err;
1499         int i;
1500
1501         /* Create instance with 4 ports */
1502         if (init(t, 2, 3) < 0 ||
1503                         create_ports(t, 3) < 0 ||
1504                         create_atomic_qids(t, 2) < 0) {
1505                 printf("%d: Error initializing device\n", __LINE__);
1506                 return -1;
1507         }
1508
1509         /* CQ mapping to QID */
1510         err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
1511         if (err != 1) {
1512                 printf("%d: error mapping lb qid\n", __LINE__);
1513                 cleanup(t);
1514                 return -1;
1515         }
1516         err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
1517         if (err != 1) {
1518                 printf("%d: error mapping lb qid\n", __LINE__);
1519                 cleanup(t);
1520                 return -1;
1521         }
1522
1523         if (rte_event_dev_start(evdev) < 0) {
1524                 printf("%d: Error with start call\n", __LINE__);
1525                 return -1;
1526         }
1527
1528         /************** FORWARD ****************/
1529 #define QID1_NUM 5
1530         for (i = 0; i < QID1_NUM; i++) {
1531                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1532
1533                 if (!arp) {
1534                         printf("%d: gen of pkt failed\n", __LINE__);
1535                         goto err;
1536                 }
1537
1538                 ev.queue_id =  t->qid[0];
1539                 ev.op = RTE_EVENT_OP_NEW;
1540                 ev.mbuf = arp;
1541                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1542                 if (err != 1) {
1543                         printf("%d: Failed to enqueue\n", __LINE__);
1544                         goto err;
1545                 }
1546         }
1547 #define QID2_NUM 3
1548         for (i = 0; i < QID2_NUM; i++) {
1549                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1550
1551                 if (!arp) {
1552                         printf("%d: gen of pkt failed\n", __LINE__);
1553                         goto err;
1554                 }
1555                 ev.queue_id =  t->qid[1];
1556                 ev.op = RTE_EVENT_OP_NEW;
1557                 ev.mbuf = arp;
1558                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1559                 if (err != 1) {
1560                         printf("%d: Failed to enqueue\n", __LINE__);
1561                         goto err;
1562                 }
1563         }
1564
1565         /* schedule */
1566         rte_event_schedule(evdev);
1567
1568         err = test_event_dev_stats_get(evdev, &stats);
1569         if (err) {
1570                 printf("%d: failed to get stats\n", __LINE__);
1571                 goto err;
1572         }
1573
1574         if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
1575                         stats.tx_pkts != QID1_NUM + QID2_NUM) {
1576                 printf("%d: Sched core didn't handle pkt as expected\n",
1577                                 __LINE__);
1578                 goto err;
1579         }
1580
1581         if (stats.port_inflight[p1] != QID1_NUM) {
1582                 printf("%d: %s port 1 inflight not correct\n", __LINE__,
1583                                 __func__);
1584                 goto err;
1585         }
1586         if (stats.port_inflight[p2] != QID2_NUM) {
1587                 printf("%d: %s port 2 inflight not correct\n", __LINE__,
1588                                 __func__);
1589                 goto err;
1590         }
1591
1592         /************** DEQUEUE INFLIGHT COUNT CHECKS  ****************/
1593         /* port 1 */
1594         struct rte_event events[QID1_NUM + QID2_NUM];
1595         uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
1596                         RTE_DIM(events), 0);
1597
1598         if (deq_pkts != QID1_NUM) {
1599                 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
1600                 goto err;
1601         }
1602         err = test_event_dev_stats_get(evdev, &stats);
1603         if (stats.port_inflight[p1] != QID1_NUM) {
1604                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
1605                                 __LINE__);
1606                 goto err;
1607         }
1608         for (i = 0; i < QID1_NUM; i++) {
1609                 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
1610                                 1);
1611                 if (err != 1) {
1612                         printf("%d: %s rte enqueue of inf release failed\n",
1613                                 __LINE__, __func__);
1614                         goto err;
1615                 }
1616         }
1617
1618         /*
1619          * As the scheduler core decrements inflights, it needs to run to
1620          * process packets to act on the drop messages
1621          */
1622         rte_event_schedule(evdev);
1623
1624         err = test_event_dev_stats_get(evdev, &stats);
1625         if (stats.port_inflight[p1] != 0) {
1626                 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
1627                 goto err;
1628         }
1629
1630         /* port2 */
1631         deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
1632                         RTE_DIM(events), 0);
1633         if (deq_pkts != QID2_NUM) {
1634                 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
1635                 goto err;
1636         }
1637         err = test_event_dev_stats_get(evdev, &stats);
1638         if (stats.port_inflight[p2] != QID2_NUM) {
1639                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
1640                                 __LINE__);
1641                 goto err;
1642         }
1643         for (i = 0; i < QID2_NUM; i++) {
1644                 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
1645                                 1);
1646                 if (err != 1) {
1647                         printf("%d: %s rte enqueue of inf release failed\n",
1648                                 __LINE__, __func__);
1649                         goto err;
1650                 }
1651         }
1652
1653         /*
1654          * As the scheduler core decrements inflights, it needs to run to
1655          * process packets to act on the drop messages
1656          */
1657         rte_event_schedule(evdev);
1658
1659         err = test_event_dev_stats_get(evdev, &stats);
1660         if (stats.port_inflight[p2] != 0) {
1661                 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
1662                 goto err;
1663         }
1664         cleanup(t);
1665         return 0;
1666
1667 err:
1668         rte_event_dev_dump(evdev, stdout);
1669         cleanup(t);
1670         return -1;
1671 }
1672
1673 static int
1674 parallel_basic(struct test *t, int check_order)
1675 {
1676         const uint8_t rx_port = 0;
1677         const uint8_t w1_port = 1;
1678         const uint8_t w3_port = 3;
1679         const uint8_t tx_port = 4;
1680         int err;
1681         int i;
1682         uint32_t deq_pkts, j;
1683         struct rte_mbuf *mbufs[3];
1684         struct rte_mbuf *mbufs_out[3];
1685         const uint32_t MAGIC_SEQN = 1234;
1686
1687         /* Create instance with 4 ports */
1688         if (init(t, 2, tx_port + 1) < 0 ||
1689                         create_ports(t, tx_port + 1) < 0 ||
1690                         (check_order ?  create_ordered_qids(t, 1) :
1691                                 create_unordered_qids(t, 1)) < 0 ||
1692                         create_directed_qids(t, 1, &tx_port)) {
1693                 printf("%d: Error initializing device\n", __LINE__);
1694                 return -1;
1695         }
1696
1697         /*
1698          * CQ mapping to QID
1699          * We need three ports, all mapped to the same ordered qid0. Then we'll
1700          * take a packet out to each port, re-enqueue in reverse order,
1701          * then make sure the reordering has taken place properly when we
1702          * dequeue from the tx_port.
1703          *
1704          * Simplified test setup diagram:
1705          *
1706          * rx_port        w1_port
1707          *        \     /         \
1708          *         qid0 - w2_port - qid1
1709          *              \         /     \
1710          *                w3_port        tx_port
1711          */
1712         /* CQ mapping to QID for LB ports (directed mapped on create) */
1713         for (i = w1_port; i <= w3_port; i++) {
1714                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
1715                                 1);
1716                 if (err != 1) {
1717                         printf("%d: error mapping lb qid\n", __LINE__);
1718                         cleanup(t);
1719                         return -1;
1720                 }
1721         }
1722
1723         if (rte_event_dev_start(evdev) < 0) {
1724                 printf("%d: Error with start call\n", __LINE__);
1725                 return -1;
1726         }
1727
1728         /* Enqueue 3 packets to the rx port */
1729         for (i = 0; i < 3; i++) {
1730                 struct rte_event ev;
1731                 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
1732                 if (!mbufs[i]) {
1733                         printf("%d: gen of pkt failed\n", __LINE__);
1734                         return -1;
1735                 }
1736
1737                 ev.queue_id = t->qid[0];
1738                 ev.op = RTE_EVENT_OP_NEW;
1739                 ev.mbuf = mbufs[i];
1740                 mbufs[i]->seqn = MAGIC_SEQN + i;
1741
1742                 /* generate pkt and enqueue */
1743                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
1744                 if (err != 1) {
1745                         printf("%d: Failed to enqueue pkt %u, retval = %u\n",
1746                                         __LINE__, i, err);
1747                         return -1;
1748                 }
1749         }
1750
1751         rte_event_schedule(evdev);
1752
1753         /* use extra slot to make logic in loops easier */
1754         struct rte_event deq_ev[w3_port + 1];
1755
1756         /* Dequeue the 3 packets, one from each worker port */
1757         for (i = w1_port; i <= w3_port; i++) {
1758                 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
1759                                 &deq_ev[i], 1, 0);
1760                 if (deq_pkts != 1) {
1761                         printf("%d: Failed to deq\n", __LINE__);
1762                         rte_event_dev_dump(evdev, stdout);
1763                         return -1;
1764                 }
1765         }
1766
1767         /* Enqueue each packet in reverse order, flushing after each one */
1768         for (i = w3_port; i >= w1_port; i--) {
1769
1770                 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
1771                 deq_ev[i].queue_id = t->qid[1];
1772                 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
1773                 if (err != 1) {
1774                         printf("%d: Failed to enqueue\n", __LINE__);
1775                         return -1;
1776                 }
1777         }
1778         rte_event_schedule(evdev);
1779
1780         /* dequeue from the tx ports, we should get 3 packets */
1781         deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
1782                         3, 0);
1783
1784         /* Check to see if we've got all 3 packets */
1785         if (deq_pkts != 3) {
1786                 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
1787                         __LINE__, deq_pkts, tx_port);
1788                 rte_event_dev_dump(evdev, stdout);
1789                 return 1;
1790         }
1791
1792         /* Check to see if the sequence numbers are in expected order */
1793         if (check_order) {
1794                 for (j = 0 ; j < deq_pkts ; j++) {
1795                         if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
1796                                 printf(
1797                                         "%d: Incorrect sequence number(%d) from port %d\n",
1798                                         __LINE__, mbufs_out[j]->seqn, tx_port);
1799                                 return -1;
1800                         }
1801                 }
1802         }
1803
1804         /* Destroy the instance */
1805         cleanup(t);
1806         return 0;
1807 }
1808
1809 static int
1810 ordered_basic(struct test *t)
1811 {
1812         return parallel_basic(t, 1);
1813 }
1814
1815 static int
1816 unordered_basic(struct test *t)
1817 {
1818         return parallel_basic(t, 0);
1819 }
1820
1821 static struct rte_mempool *eventdev_func_mempool;
1822
1823 static int
1824 test_sw_eventdev(void)
1825 {
1826         struct test *t = malloc(sizeof(struct test));
1827         int ret;
1828
1829         /* manually initialize the op, older gcc's complain on static
1830          * initialization of struct elements that are a bitfield.
1831          */
1832         release_ev.op = RTE_EVENT_OP_RELEASE;
1833
1834         const char *eventdev_name = "event_sw0";
1835         evdev = rte_event_dev_get_dev_id(eventdev_name);
1836         if (evdev < 0) {
1837                 printf("%d: Eventdev %s not found - creating.\n",
1838                                 __LINE__, eventdev_name);
1839                 if (rte_eal_vdev_init(eventdev_name, NULL) < 0) {
1840                         printf("Error creating eventdev\n");
1841                         return -1;
1842                 }
1843                 evdev = rte_event_dev_get_dev_id(eventdev_name);
1844                 if (evdev < 0) {
1845                         printf("Error finding newly created eventdev\n");
1846                         return -1;
1847                 }
1848         }
1849
1850         /* Only create mbuf pool once, reuse for each test run */
1851         if (!eventdev_func_mempool) {
1852                 eventdev_func_mempool = rte_pktmbuf_pool_create(
1853                                 "EVENTDEV_SW_SA_MBUF_POOL",
1854                                 (1<<12), /* 4k buffers */
1855                                 32 /*MBUF_CACHE_SIZE*/,
1856                                 0,
1857                                 512, /* use very small mbufs */
1858                                 rte_socket_id());
1859                 if (!eventdev_func_mempool) {
1860                         printf("ERROR creating mempool\n");
1861                         return -1;
1862                 }
1863         }
1864         t->mbuf_pool = eventdev_func_mempool;
1865
1866         printf("*** Running Single Directed Packet test...\n");
1867         ret = test_single_directed_packet(t);
1868         if (ret != 0) {
1869                 printf("ERROR - Single Directed Packet test FAILED.\n");
1870                 return ret;
1871         }
1872         printf("*** Running Single Load Balanced Packet test...\n");
1873         ret = single_packet(t);
1874         if (ret != 0) {
1875                 printf("ERROR - Single Packet test FAILED.\n");
1876                 return ret;
1877         }
1878         printf("*** Running Unordered Basic test...\n");
1879         ret = unordered_basic(t);
1880         if (ret != 0) {
1881                 printf("ERROR -  Unordered Basic test FAILED.\n");
1882                 return ret;
1883         }
1884         printf("*** Running Ordered Basic test...\n");
1885         ret = ordered_basic(t);
1886         if (ret != 0) {
1887                 printf("ERROR -  Ordered Basic test FAILED.\n");
1888                 return ret;
1889         }
1890         printf("*** Running Burst Packets test...\n");
1891         ret = burst_packets(t);
1892         if (ret != 0) {
1893                 printf("ERROR - Burst Packets test FAILED.\n");
1894                 return ret;
1895         }
1896         printf("*** Running Load Balancing test...\n");
1897         ret = load_balancing(t);
1898         if (ret != 0) {
1899                 printf("ERROR - Load Balancing test FAILED.\n");
1900                 return ret;
1901         }
1902         printf("*** Running Prioritized Directed test...\n");
1903         ret = test_priority_directed(t);
1904         if (ret != 0) {
1905                 printf("ERROR - Prioritized Directed test FAILED.\n");
1906                 return ret;
1907         }
1908         printf("*** Running Prioritized Atomic test...\n");
1909         ret = test_priority_atomic(t);
1910         if (ret != 0) {
1911                 printf("ERROR - Prioritized Atomic test FAILED.\n");
1912                 return ret;
1913         }
1914
1915         printf("*** Running Prioritized Ordered test...\n");
1916         ret = test_priority_ordered(t);
1917         if (ret != 0) {
1918                 printf("ERROR - Prioritized Ordered test FAILED.\n");
1919                 return ret;
1920         }
1921         printf("*** Running Prioritized Unordered test...\n");
1922         ret = test_priority_unordered(t);
1923         if (ret != 0) {
1924                 printf("ERROR - Prioritized Unordered test FAILED.\n");
1925                 return ret;
1926         }
1927         printf("*** Running Invalid QID test...\n");
1928         ret = invalid_qid(t);
1929         if (ret != 0) {
1930                 printf("ERROR - Invalid QID test FAILED.\n");
1931                 return ret;
1932         }
1933         printf("*** Running Load Balancing History test...\n");
1934         ret = load_balancing_history(t);
1935         if (ret != 0) {
1936                 printf("ERROR - Load Balancing History test FAILED.\n");
1937                 return ret;
1938         }
1939         printf("*** Running Inflight Count test...\n");
1940         ret = inflight_counts(t);
1941         if (ret != 0) {
1942                 printf("ERROR - Inflight Count test FAILED.\n");
1943                 return ret;
1944         }
1945         printf("*** Running Abuse Inflights test...\n");
1946         ret = abuse_inflights(t);
1947         if (ret != 0) {
1948                 printf("ERROR - Abuse Inflights test FAILED.\n");
1949                 return ret;
1950         }
1951         printf("*** Running QID Priority test...\n");
1952         ret = qid_priorities(t);
1953         if (ret != 0) {
1954                 printf("ERROR - QID Priority test FAILED.\n");
1955                 return ret;
1956         }
1957         printf("*** Running Ordered Reconfigure test...\n");
1958         ret = ordered_reconfigure(t);
1959         if (ret != 0) {
1960                 printf("ERROR - Ordered Reconfigure test FAILED.\n");
1961                 return ret;
1962         }
1963         printf("*** Running Port LB Single Reconfig test...\n");
1964         ret = port_single_lb_reconfig(t);
1965         if (ret != 0) {
1966                 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
1967                 return ret;
1968         }
1969         printf("*** Running Port Reconfig Credits test...\n");
1970         ret = port_reconfig_credits(t);
1971         if (ret != 0) {
1972                 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
1973                 return ret;
1974         }
1975         /*
1976          * Free test instance, leaving mempool initialized, and a pointer to it
1977          * in static eventdev_func_mempool, as it is re-used on re-runs
1978          */
1979         free(t);
1980
1981         return 0;
1982 }
1983
1984 REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);