net/bonding: fix RSS inconsistency between ports
[dpdk.git] / drivers / event / sw / sw_evdev_selftest.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <string.h>
7 #include <stdint.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/queue.h>
11
12 #include <rte_memory.h>
13 #include <rte_launch.h>
14 #include <rte_eal.h>
15 #include <rte_per_lcore.h>
16 #include <rte_lcore.h>
17 #include <rte_debug.h>
18 #include <rte_ethdev.h>
19 #include <rte_cycles.h>
20 #include <rte_eventdev.h>
21 #include <rte_pause.h>
22 #include <rte_service.h>
23 #include <rte_service_component.h>
24 #include <rte_bus_vdev.h>
25
26 #include "sw_evdev.h"
27
28 #define MAX_PORTS 16
29 #define MAX_QIDS 16
30 #define NUM_PACKETS (1<<18)
31 #define DEQUEUE_DEPTH 128
32
33 static int evdev;
34
35 struct test {
36         struct rte_mempool *mbuf_pool;
37         uint8_t port[MAX_PORTS];
38         uint8_t qid[MAX_QIDS];
39         int nb_qids;
40         uint32_t service_id;
41 };
42
43 typedef uint8_t counter_dynfield_t;
44 static int counter_dynfield_offset = -1;
45
46 static inline counter_dynfield_t *
47 counter_field(struct rte_mbuf *mbuf)
48 {
49         return RTE_MBUF_DYNFIELD(mbuf, \
50                         counter_dynfield_offset, counter_dynfield_t *);
51 }
52
53 static struct rte_event release_ev;
54
55 static inline struct rte_mbuf *
56 rte_gen_arp(int portid, struct rte_mempool *mp)
57 {
58         /*
59          * len = 14 + 46
60          * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
61          */
62         static const uint8_t arp_request[] = {
63                 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
64                 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
65                 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
66                 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
67                 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
68                 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
69                 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
70                 0x00, 0x00, 0x00, 0x00
71         };
72         struct rte_mbuf *m;
73         int pkt_len = sizeof(arp_request) - 1;
74
75         m = rte_pktmbuf_alloc(mp);
76         if (!m)
77                 return 0;
78
79         memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
80                 arp_request, pkt_len);
81         rte_pktmbuf_pkt_len(m) = pkt_len;
82         rte_pktmbuf_data_len(m) = pkt_len;
83
84         RTE_SET_USED(portid);
85
86         return m;
87 }
88
89 static void
90 xstats_print(void)
91 {
92         const uint32_t XSTATS_MAX = 1024;
93         uint32_t i;
94         uint32_t ids[XSTATS_MAX];
95         uint64_t values[XSTATS_MAX];
96         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
97
98         for (i = 0; i < XSTATS_MAX; i++)
99                 ids[i] = i;
100
101         /* Device names / values */
102         int ret = rte_event_dev_xstats_names_get(evdev,
103                                         RTE_EVENT_DEV_XSTATS_DEVICE, 0,
104                                         xstats_names, ids, XSTATS_MAX);
105         if (ret < 0) {
106                 printf("%d: xstats names get() returned error\n",
107                         __LINE__);
108                 return;
109         }
110         ret = rte_event_dev_xstats_get(evdev,
111                                         RTE_EVENT_DEV_XSTATS_DEVICE,
112                                         0, ids, values, ret);
113         if (ret > (signed int)XSTATS_MAX)
114                 printf("%s %d: more xstats available than space\n",
115                                 __func__, __LINE__);
116         for (i = 0; (signed int)i < ret; i++) {
117                 printf("%d : %s : %"PRIu64"\n",
118                                 i, xstats_names[i].name, values[i]);
119         }
120
121         /* Port names / values */
122         ret = rte_event_dev_xstats_names_get(evdev,
123                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
124                                         xstats_names, ids, XSTATS_MAX);
125         ret = rte_event_dev_xstats_get(evdev,
126                                         RTE_EVENT_DEV_XSTATS_PORT, 1,
127                                         ids, values, ret);
128         if (ret > (signed int)XSTATS_MAX)
129                 printf("%s %d: more xstats available than space\n",
130                                 __func__, __LINE__);
131         for (i = 0; (signed int)i < ret; i++) {
132                 printf("%d : %s : %"PRIu64"\n",
133                                 i, xstats_names[i].name, values[i]);
134         }
135
136         /* Queue names / values */
137         ret = rte_event_dev_xstats_names_get(evdev,
138                                         RTE_EVENT_DEV_XSTATS_QUEUE, 0,
139                                         xstats_names, ids, XSTATS_MAX);
140         ret = rte_event_dev_xstats_get(evdev,
141                                         RTE_EVENT_DEV_XSTATS_QUEUE,
142                                         1, ids, values, ret);
143         if (ret > (signed int)XSTATS_MAX)
144                 printf("%s %d: more xstats available than space\n",
145                                 __func__, __LINE__);
146         for (i = 0; (signed int)i < ret; i++) {
147                 printf("%d : %s : %"PRIu64"\n",
148                                 i, xstats_names[i].name, values[i]);
149         }
150 }
151
152 /* initialization and config */
153 static inline int
154 init(struct test *t, int nb_queues, int nb_ports)
155 {
156         struct rte_event_dev_config config = {
157                         .nb_event_queues = nb_queues,
158                         .nb_event_ports = nb_ports,
159                         .nb_event_queue_flows = 1024,
160                         .nb_events_limit = 4096,
161                         .nb_event_port_dequeue_depth = DEQUEUE_DEPTH,
162                         .nb_event_port_enqueue_depth = 128,
163         };
164         int ret;
165
166         void *temp = t->mbuf_pool; /* save and restore mbuf pool */
167
168         memset(t, 0, sizeof(*t));
169         t->mbuf_pool = temp;
170
171         ret = rte_event_dev_configure(evdev, &config);
172         if (ret < 0)
173                 printf("%d: Error configuring device\n", __LINE__);
174         return ret;
175 };
176
177 static inline int
178 create_ports(struct test *t, int num_ports)
179 {
180         int i;
181         static const struct rte_event_port_conf conf = {
182                         .new_event_threshold = 1024,
183                         .dequeue_depth = 32,
184                         .enqueue_depth = 64,
185         };
186         if (num_ports > MAX_PORTS)
187                 return -1;
188
189         for (i = 0; i < num_ports; i++) {
190                 if (rte_event_port_setup(evdev, i, &conf) < 0) {
191                         printf("Error setting up port %d\n", i);
192                         return -1;
193                 }
194                 t->port[i] = i;
195         }
196
197         return 0;
198 }
199
200 static inline int
201 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
202 {
203         int i;
204
205         /* Q creation */
206         const struct rte_event_queue_conf conf = {
207                         .schedule_type = flags,
208                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
209                         .nb_atomic_flows = 1024,
210                         .nb_atomic_order_sequences = 1024,
211         };
212
213         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
214                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
215                         printf("%d: error creating qid %d\n", __LINE__, i);
216                         return -1;
217                 }
218                 t->qid[i] = i;
219         }
220         t->nb_qids += num_qids;
221         if (t->nb_qids > MAX_QIDS)
222                 return -1;
223
224         return 0;
225 }
226
227 static inline int
228 create_atomic_qids(struct test *t, int num_qids)
229 {
230         return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
231 }
232
233 static inline int
234 create_ordered_qids(struct test *t, int num_qids)
235 {
236         return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
237 }
238
239
240 static inline int
241 create_unordered_qids(struct test *t, int num_qids)
242 {
243         return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
244 }
245
246 static inline int
247 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
248 {
249         int i;
250
251         /* Q creation */
252         static const struct rte_event_queue_conf conf = {
253                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
254                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
255         };
256
257         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
258                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
259                         printf("%d: error creating qid %d\n", __LINE__, i);
260                         return -1;
261                 }
262                 t->qid[i] = i;
263
264                 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
265                                 &t->qid[i], NULL, 1) != 1) {
266                         printf("%d: error creating link for qid %d\n",
267                                         __LINE__, i);
268                         return -1;
269                 }
270         }
271         t->nb_qids += num_qids;
272         if (t->nb_qids > MAX_QIDS)
273                 return -1;
274
275         return 0;
276 }
277
278 /* destruction */
279 static inline int
280 cleanup(struct test *t __rte_unused)
281 {
282         rte_event_dev_stop(evdev);
283         rte_event_dev_close(evdev);
284         return 0;
285 };
286
287 struct test_event_dev_stats {
288         uint64_t rx_pkts;       /**< Total packets received */
289         uint64_t rx_dropped;    /**< Total packets dropped (Eg Invalid QID) */
290         uint64_t tx_pkts;       /**< Total packets transmitted */
291
292         /** Packets received on this port */
293         uint64_t port_rx_pkts[MAX_PORTS];
294         /** Packets dropped on this port */
295         uint64_t port_rx_dropped[MAX_PORTS];
296         /** Packets inflight on this port */
297         uint64_t port_inflight[MAX_PORTS];
298         /** Packets transmitted on this port */
299         uint64_t port_tx_pkts[MAX_PORTS];
300         /** Packets received on this qid */
301         uint64_t qid_rx_pkts[MAX_QIDS];
302         /** Packets dropped on this qid */
303         uint64_t qid_rx_dropped[MAX_QIDS];
304         /** Packets transmitted on this qid */
305         uint64_t qid_tx_pkts[MAX_QIDS];
306 };
307
308 static inline int
309 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
310 {
311         static uint32_t i;
312         static uint32_t total_ids[3]; /* rx, tx and drop */
313         static uint32_t port_rx_pkts_ids[MAX_PORTS];
314         static uint32_t port_rx_dropped_ids[MAX_PORTS];
315         static uint32_t port_inflight_ids[MAX_PORTS];
316         static uint32_t port_tx_pkts_ids[MAX_PORTS];
317         static uint32_t qid_rx_pkts_ids[MAX_QIDS];
318         static uint32_t qid_rx_dropped_ids[MAX_QIDS];
319         static uint32_t qid_tx_pkts_ids[MAX_QIDS];
320
321
322         stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
323                         "dev_rx", &total_ids[0]);
324         stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
325                         "dev_drop", &total_ids[1]);
326         stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
327                         "dev_tx", &total_ids[2]);
328         for (i = 0; i < MAX_PORTS; i++) {
329                 char name[32];
330                 snprintf(name, sizeof(name), "port_%u_rx", i);
331                 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
332                                 dev_id, name, &port_rx_pkts_ids[i]);
333                 snprintf(name, sizeof(name), "port_%u_drop", i);
334                 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
335                                 dev_id, name, &port_rx_dropped_ids[i]);
336                 snprintf(name, sizeof(name), "port_%u_inflight", i);
337                 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
338                                 dev_id, name, &port_inflight_ids[i]);
339                 snprintf(name, sizeof(name), "port_%u_tx", i);
340                 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
341                                 dev_id, name, &port_tx_pkts_ids[i]);
342         }
343         for (i = 0; i < MAX_QIDS; i++) {
344                 char name[32];
345                 snprintf(name, sizeof(name), "qid_%u_rx", i);
346                 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
347                                 dev_id, name, &qid_rx_pkts_ids[i]);
348                 snprintf(name, sizeof(name), "qid_%u_drop", i);
349                 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
350                                 dev_id, name, &qid_rx_dropped_ids[i]);
351                 snprintf(name, sizeof(name), "qid_%u_tx", i);
352                 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
353                                 dev_id, name, &qid_tx_pkts_ids[i]);
354         }
355
356         return 0;
357 }
358
359 /* run_prio_packet_test
360  * This performs a basic packet priority check on the test instance passed in.
361  * It is factored out of the main priority tests as the same tests must be
362  * performed to ensure prioritization of each type of QID.
363  *
364  * Requirements:
365  *  - An initialized test structure, including mempool
366  *  - t->port[0] is initialized for both Enq / Deq of packets to the QID
367  *  - t->qid[0] is the QID to be tested
368  *  - if LB QID, the CQ must be mapped to the QID.
369  */
370 static int
371 run_prio_packet_test(struct test *t)
372 {
373         int err;
374         const uint32_t MAGIC_SEQN[] = {4711, 1234};
375         const uint32_t PRIORITY[] = {
376                 RTE_EVENT_DEV_PRIORITY_NORMAL,
377                 RTE_EVENT_DEV_PRIORITY_HIGHEST
378         };
379         unsigned int i;
380         for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
381                 /* generate pkt and enqueue */
382                 struct rte_event ev;
383                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
384                 if (!arp) {
385                         printf("%d: gen of pkt failed\n", __LINE__);
386                         return -1;
387                 }
388                 *rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN[i];
389
390                 ev = (struct rte_event){
391                         .priority = PRIORITY[i],
392                         .op = RTE_EVENT_OP_NEW,
393                         .queue_id = t->qid[0],
394                         .mbuf = arp
395                 };
396                 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
397                 if (err != 1) {
398                         printf("%d: error failed to enqueue\n", __LINE__);
399                         return -1;
400                 }
401         }
402
403         rte_service_run_iter_on_app_lcore(t->service_id, 1);
404
405         struct test_event_dev_stats stats;
406         err = test_event_dev_stats_get(evdev, &stats);
407         if (err) {
408                 printf("%d: error failed to get stats\n", __LINE__);
409                 return -1;
410         }
411
412         if (stats.port_rx_pkts[t->port[0]] != 2) {
413                 printf("%d: error stats incorrect for directed port\n",
414                                 __LINE__);
415                 rte_event_dev_dump(evdev, stdout);
416                 return -1;
417         }
418
419         struct rte_event ev, ev2;
420         uint32_t deq_pkts;
421         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
422         if (deq_pkts != 1) {
423                 printf("%d: error failed to deq\n", __LINE__);
424                 rte_event_dev_dump(evdev, stdout);
425                 return -1;
426         }
427         if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN[1]) {
428                 printf("%d: first packet out not highest priority\n",
429                                 __LINE__);
430                 rte_event_dev_dump(evdev, stdout);
431                 return -1;
432         }
433         rte_pktmbuf_free(ev.mbuf);
434
435         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
436         if (deq_pkts != 1) {
437                 printf("%d: error failed to deq\n", __LINE__);
438                 rte_event_dev_dump(evdev, stdout);
439                 return -1;
440         }
441         if (*rte_event_pmd_selftest_seqn(ev2.mbuf) != MAGIC_SEQN[0]) {
442                 printf("%d: second packet out not lower priority\n",
443                                 __LINE__);
444                 rte_event_dev_dump(evdev, stdout);
445                 return -1;
446         }
447         rte_pktmbuf_free(ev2.mbuf);
448
449         cleanup(t);
450         return 0;
451 }
452
453 static int
454 test_single_directed_packet(struct test *t)
455 {
456         const int rx_enq = 0;
457         const int wrk_enq = 2;
458         int err;
459
460         /* Create instance with 3 directed QIDs going to 3 ports */
461         if (init(t, 3, 3) < 0 ||
462                         create_ports(t, 3) < 0 ||
463                         create_directed_qids(t, 3, t->port) < 0)
464                 return -1;
465
466         if (rte_event_dev_start(evdev) < 0) {
467                 printf("%d: Error with start call\n", __LINE__);
468                 return -1;
469         }
470
471         /************** FORWARD ****************/
472         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
473         struct rte_event ev = {
474                         .op = RTE_EVENT_OP_NEW,
475                         .queue_id = wrk_enq,
476                         .mbuf = arp,
477         };
478
479         if (!arp) {
480                 printf("%d: gen of pkt failed\n", __LINE__);
481                 return -1;
482         }
483
484         const uint32_t MAGIC_SEQN = 4711;
485         *rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN;
486
487         /* generate pkt and enqueue */
488         err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
489         if (err != 1) {
490                 printf("%d: error failed to enqueue\n", __LINE__);
491                 return -1;
492         }
493
494         /* Run schedule() as dir packets may need to be re-ordered */
495         rte_service_run_iter_on_app_lcore(t->service_id, 1);
496
497         struct test_event_dev_stats stats;
498         err = test_event_dev_stats_get(evdev, &stats);
499         if (err) {
500                 printf("%d: error failed to get stats\n", __LINE__);
501                 return -1;
502         }
503
504         if (stats.port_rx_pkts[rx_enq] != 1) {
505                 printf("%d: error stats incorrect for directed port\n",
506                                 __LINE__);
507                 return -1;
508         }
509
510         uint32_t deq_pkts;
511         deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
512         if (deq_pkts != 1) {
513                 printf("%d: error failed to deq\n", __LINE__);
514                 return -1;
515         }
516
517         err = test_event_dev_stats_get(evdev, &stats);
518         if (stats.port_rx_pkts[wrk_enq] != 0 &&
519                         stats.port_rx_pkts[wrk_enq] != 1) {
520                 printf("%d: error directed stats post-dequeue\n", __LINE__);
521                 return -1;
522         }
523
524         if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN) {
525                 printf("%d: error magic sequence number not dequeued\n",
526                                 __LINE__);
527                 return -1;
528         }
529
530         rte_pktmbuf_free(ev.mbuf);
531         cleanup(t);
532         return 0;
533 }
534
535 static int
536 test_directed_forward_credits(struct test *t)
537 {
538         uint32_t i;
539         int32_t err;
540
541         if (init(t, 1, 1) < 0 ||
542                         create_ports(t, 1) < 0 ||
543                         create_directed_qids(t, 1, t->port) < 0)
544                 return -1;
545
546         if (rte_event_dev_start(evdev) < 0) {
547                 printf("%d: Error with start call\n", __LINE__);
548                 return -1;
549         }
550
551         struct rte_event ev = {
552                         .op = RTE_EVENT_OP_NEW,
553                         .queue_id = 0,
554         };
555
556         for (i = 0; i < 1000; i++) {
557                 err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
558                 if (err != 1) {
559                         printf("%d: error failed to enqueue\n", __LINE__);
560                         return -1;
561                 }
562                 rte_service_run_iter_on_app_lcore(t->service_id, 1);
563
564                 uint32_t deq_pkts;
565                 deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
566                 if (deq_pkts != 1) {
567                         printf("%d: error failed to deq\n", __LINE__);
568                         return -1;
569                 }
570
571                 /* re-write event to be a forward, and continue looping it */
572                 ev.op = RTE_EVENT_OP_FORWARD;
573         }
574
575         cleanup(t);
576         return 0;
577 }
578
579
580 static int
581 test_priority_directed(struct test *t)
582 {
583         if (init(t, 1, 1) < 0 ||
584                         create_ports(t, 1) < 0 ||
585                         create_directed_qids(t, 1, t->port) < 0) {
586                 printf("%d: Error initializing device\n", __LINE__);
587                 return -1;
588         }
589
590         if (rte_event_dev_start(evdev) < 0) {
591                 printf("%d: Error with start call\n", __LINE__);
592                 return -1;
593         }
594
595         return run_prio_packet_test(t);
596 }
597
598 static int
599 test_priority_atomic(struct test *t)
600 {
601         if (init(t, 1, 1) < 0 ||
602                         create_ports(t, 1) < 0 ||
603                         create_atomic_qids(t, 1) < 0) {
604                 printf("%d: Error initializing device\n", __LINE__);
605                 return -1;
606         }
607
608         /* map the QID */
609         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
610                 printf("%d: error mapping qid to port\n", __LINE__);
611                 return -1;
612         }
613         if (rte_event_dev_start(evdev) < 0) {
614                 printf("%d: Error with start call\n", __LINE__);
615                 return -1;
616         }
617
618         return run_prio_packet_test(t);
619 }
620
621 static int
622 test_priority_ordered(struct test *t)
623 {
624         if (init(t, 1, 1) < 0 ||
625                         create_ports(t, 1) < 0 ||
626                         create_ordered_qids(t, 1) < 0) {
627                 printf("%d: Error initializing device\n", __LINE__);
628                 return -1;
629         }
630
631         /* map the QID */
632         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
633                 printf("%d: error mapping qid to port\n", __LINE__);
634                 return -1;
635         }
636         if (rte_event_dev_start(evdev) < 0) {
637                 printf("%d: Error with start call\n", __LINE__);
638                 return -1;
639         }
640
641         return run_prio_packet_test(t);
642 }
643
644 static int
645 test_priority_unordered(struct test *t)
646 {
647         if (init(t, 1, 1) < 0 ||
648                         create_ports(t, 1) < 0 ||
649                         create_unordered_qids(t, 1) < 0) {
650                 printf("%d: Error initializing device\n", __LINE__);
651                 return -1;
652         }
653
654         /* map the QID */
655         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
656                 printf("%d: error mapping qid to port\n", __LINE__);
657                 return -1;
658         }
659         if (rte_event_dev_start(evdev) < 0) {
660                 printf("%d: Error with start call\n", __LINE__);
661                 return -1;
662         }
663
664         return run_prio_packet_test(t);
665 }
666
667 static int
668 burst_packets(struct test *t)
669 {
670         /************** CONFIG ****************/
671         uint32_t i;
672         int err;
673         int ret;
674
675         /* Create instance with 2 ports and 2 queues */
676         if (init(t, 2, 2) < 0 ||
677                         create_ports(t, 2) < 0 ||
678                         create_atomic_qids(t, 2) < 0) {
679                 printf("%d: Error initializing device\n", __LINE__);
680                 return -1;
681         }
682
683         /* CQ mapping to QID */
684         ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
685         if (ret != 1) {
686                 printf("%d: error mapping lb qid0\n", __LINE__);
687                 return -1;
688         }
689         ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
690         if (ret != 1) {
691                 printf("%d: error mapping lb qid1\n", __LINE__);
692                 return -1;
693         }
694
695         if (rte_event_dev_start(evdev) < 0) {
696                 printf("%d: Error with start call\n", __LINE__);
697                 return -1;
698         }
699
700         /************** FORWARD ****************/
701         const uint32_t rx_port = 0;
702         const uint32_t NUM_PKTS = 2;
703
704         for (i = 0; i < NUM_PKTS; i++) {
705                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
706                 if (!arp) {
707                         printf("%d: error generating pkt\n", __LINE__);
708                         return -1;
709                 }
710
711                 struct rte_event ev = {
712                                 .op = RTE_EVENT_OP_NEW,
713                                 .queue_id = i % 2,
714                                 .flow_id = i % 3,
715                                 .mbuf = arp,
716                 };
717                 /* generate pkt and enqueue */
718                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
719                 if (err != 1) {
720                         printf("%d: Failed to enqueue\n", __LINE__);
721                         return -1;
722                 }
723         }
724         rte_service_run_iter_on_app_lcore(t->service_id, 1);
725
726         /* Check stats for all NUM_PKTS arrived to sched core */
727         struct test_event_dev_stats stats;
728
729         err = test_event_dev_stats_get(evdev, &stats);
730         if (err) {
731                 printf("%d: failed to get stats\n", __LINE__);
732                 return -1;
733         }
734         if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
735                 printf("%d: Sched core didn't receive all %d pkts\n",
736                                 __LINE__, NUM_PKTS);
737                 rte_event_dev_dump(evdev, stdout);
738                 return -1;
739         }
740
741         uint32_t deq_pkts;
742         int p;
743
744         deq_pkts = 0;
745         /******** DEQ QID 1 *******/
746         do {
747                 struct rte_event ev;
748                 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
749                 deq_pkts += p;
750                 rte_pktmbuf_free(ev.mbuf);
751         } while (p);
752
753         if (deq_pkts != NUM_PKTS/2) {
754                 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
755                                 __LINE__);
756                 return -1;
757         }
758
759         /******** DEQ QID 2 *******/
760         deq_pkts = 0;
761         do {
762                 struct rte_event ev;
763                 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
764                 deq_pkts += p;
765                 rte_pktmbuf_free(ev.mbuf);
766         } while (p);
767         if (deq_pkts != NUM_PKTS/2) {
768                 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
769                                 __LINE__);
770                 return -1;
771         }
772
773         cleanup(t);
774         return 0;
775 }
776
777 static int
778 abuse_inflights(struct test *t)
779 {
780         const int rx_enq = 0;
781         const int wrk_enq = 2;
782         int err;
783
784         /* Create instance with 4 ports */
785         if (init(t, 1, 4) < 0 ||
786                         create_ports(t, 4) < 0 ||
787                         create_atomic_qids(t, 1) < 0) {
788                 printf("%d: Error initializing device\n", __LINE__);
789                 return -1;
790         }
791
792         /* CQ mapping to QID */
793         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
794         if (err != 1) {
795                 printf("%d: error mapping lb qid\n", __LINE__);
796                 cleanup(t);
797                 return -1;
798         }
799
800         if (rte_event_dev_start(evdev) < 0) {
801                 printf("%d: Error with start call\n", __LINE__);
802                 return -1;
803         }
804
805         /* Enqueue op only */
806         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
807         if (err != 1) {
808                 printf("%d: Failed to enqueue\n", __LINE__);
809                 return -1;
810         }
811
812         /* schedule */
813         rte_service_run_iter_on_app_lcore(t->service_id, 1);
814
815         struct test_event_dev_stats stats;
816
817         err = test_event_dev_stats_get(evdev, &stats);
818         if (err) {
819                 printf("%d: failed to get stats\n", __LINE__);
820                 return -1;
821         }
822
823         if (stats.rx_pkts != 0 ||
824                         stats.tx_pkts != 0 ||
825                         stats.port_inflight[wrk_enq] != 0) {
826                 printf("%d: Sched core didn't handle pkt as expected\n",
827                                 __LINE__);
828                 return -1;
829         }
830
831         cleanup(t);
832         return 0;
833 }
834
835 static int
836 xstats_tests(struct test *t)
837 {
838         const int wrk_enq = 2;
839         int err;
840
841         /* Create instance with 4 ports */
842         if (init(t, 1, 4) < 0 ||
843                         create_ports(t, 4) < 0 ||
844                         create_atomic_qids(t, 1) < 0) {
845                 printf("%d: Error initializing device\n", __LINE__);
846                 return -1;
847         }
848
849         /* CQ mapping to QID */
850         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
851         if (err != 1) {
852                 printf("%d: error mapping lb qid\n", __LINE__);
853                 cleanup(t);
854                 return -1;
855         }
856
857         if (rte_event_dev_start(evdev) < 0) {
858                 printf("%d: Error with start call\n", __LINE__);
859                 return -1;
860         }
861
862         const uint32_t XSTATS_MAX = 1024;
863
864         uint32_t i;
865         uint32_t ids[XSTATS_MAX];
866         uint64_t values[XSTATS_MAX];
867         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
868
869         for (i = 0; i < XSTATS_MAX; i++)
870                 ids[i] = i;
871
872         /* Device names / values */
873         int ret = rte_event_dev_xstats_names_get(evdev,
874                                         RTE_EVENT_DEV_XSTATS_DEVICE,
875                                         0, xstats_names, ids, XSTATS_MAX);
876         if (ret != 8) {
877                 printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
878                 return -1;
879         }
880         ret = rte_event_dev_xstats_get(evdev,
881                                         RTE_EVENT_DEV_XSTATS_DEVICE,
882                                         0, ids, values, ret);
883         if (ret != 8) {
884                 printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
885                 return -1;
886         }
887
888         /* Port names / values */
889         ret = rte_event_dev_xstats_names_get(evdev,
890                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
891                                         xstats_names, ids, XSTATS_MAX);
892         if (ret != 21) {
893                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
894                 return -1;
895         }
896         ret = rte_event_dev_xstats_get(evdev,
897                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
898                                         ids, values, ret);
899         if (ret != 21) {
900                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
901                 return -1;
902         }
903
904         /* Queue names / values */
905         ret = rte_event_dev_xstats_names_get(evdev,
906                                         RTE_EVENT_DEV_XSTATS_QUEUE,
907                                         0, xstats_names, ids, XSTATS_MAX);
908         if (ret != 16) {
909                 printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
910                 return -1;
911         }
912
913         /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
914         ret = rte_event_dev_xstats_get(evdev,
915                                         RTE_EVENT_DEV_XSTATS_QUEUE,
916                                         1, ids, values, ret);
917         if (ret != -EINVAL) {
918                 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
919                 return -1;
920         }
921
922         ret = rte_event_dev_xstats_get(evdev,
923                                         RTE_EVENT_DEV_XSTATS_QUEUE,
924                                         0, ids, values, ret);
925         if (ret != 16) {
926                 printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
927                 return -1;
928         }
929
930         /* enqueue packets to check values */
931         for (i = 0; i < 3; i++) {
932                 struct rte_event ev;
933                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
934                 if (!arp) {
935                         printf("%d: gen of pkt failed\n", __LINE__);
936                         return -1;
937                 }
938                 ev.queue_id = t->qid[i];
939                 ev.op = RTE_EVENT_OP_NEW;
940                 ev.mbuf = arp;
941                 ev.flow_id = 7;
942                 *rte_event_pmd_selftest_seqn(arp) = i;
943
944                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
945                 if (err != 1) {
946                         printf("%d: Failed to enqueue\n", __LINE__);
947                         return -1;
948                 }
949         }
950
951         rte_service_run_iter_on_app_lcore(t->service_id, 1);
952
953         /* Device names / values */
954         int num_stats = rte_event_dev_xstats_names_get(evdev,
955                                         RTE_EVENT_DEV_XSTATS_DEVICE, 0,
956                                         xstats_names, ids, XSTATS_MAX);
957         if (num_stats < 0)
958                 goto fail;
959         ret = rte_event_dev_xstats_get(evdev,
960                                         RTE_EVENT_DEV_XSTATS_DEVICE,
961                                         0, ids, values, num_stats);
962         static const uint64_t expected[] = {3, 3, 0, 1, 0, 0, 4, 1};
963         for (i = 0; (signed int)i < ret; i++) {
964                 if (expected[i] != values[i]) {
965                         printf(
966                                 "%d Error xstat %d (id %d) %s : %"PRIu64
967                                 ", expect %"PRIu64"\n",
968                                 __LINE__, i, ids[i], xstats_names[i].name,
969                                 values[i], expected[i]);
970                         goto fail;
971                 }
972         }
973
974         ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
975                                         0, NULL, 0);
976
977         /* ensure reset statistics are zero-ed */
978         static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0, 0, 0};
979         ret = rte_event_dev_xstats_get(evdev,
980                                         RTE_EVENT_DEV_XSTATS_DEVICE,
981                                         0, ids, values, num_stats);
982         for (i = 0; (signed int)i < ret; i++) {
983                 if (expected_zero[i] != values[i]) {
984                         printf(
985                                 "%d Error, xstat %d (id %d) %s : %"PRIu64
986                                 ", expect %"PRIu64"\n",
987                                 __LINE__, i, ids[i], xstats_names[i].name,
988                                 values[i], expected_zero[i]);
989                         goto fail;
990                 }
991         }
992
993         /* port reset checks */
994         num_stats = rte_event_dev_xstats_names_get(evdev,
995                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
996                                         xstats_names, ids, XSTATS_MAX);
997         if (num_stats < 0)
998                 goto fail;
999         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1000                                         0, ids, values, num_stats);
1001
1002         static const uint64_t port_expected[] = {
1003                 3 /* rx */,
1004                 0 /* tx */,
1005                 0 /* drop */,
1006                 0 /* inflights */,
1007                 0 /* avg pkt cycles */,
1008                 29 /* credits */,
1009                 0 /* rx ring used */,
1010                 4096 /* rx ring free */,
1011                 0 /* cq ring used */,
1012                 32 /* cq ring free */,
1013                 0 /* dequeue calls */,
1014                 /* 10 dequeue burst buckets */
1015                 0, 0, 0, 0, 0,
1016                 0, 0, 0, 0, 0,
1017         };
1018         if (ret != RTE_DIM(port_expected)) {
1019                 printf(
1020                         "%s %d: wrong number of port stats (%d), expected %zu\n",
1021                         __func__, __LINE__, ret, RTE_DIM(port_expected));
1022         }
1023
1024         for (i = 0; (signed int)i < ret; i++) {
1025                 if (port_expected[i] != values[i]) {
1026                         printf(
1027                                 "%s : %d: Error stat %s is %"PRIu64
1028                                 ", expected %"PRIu64"\n",
1029                                 __func__, __LINE__, xstats_names[i].name,
1030                                 values[i], port_expected[i]);
1031                         goto fail;
1032                 }
1033         }
1034
1035         ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1036                                         0, NULL, 0);
1037
1038         /* ensure reset statistics are zero-ed */
1039         static const uint64_t port_expected_zero[] = {
1040                 0 /* rx */,
1041                 0 /* tx */,
1042                 0 /* drop */,
1043                 0 /* inflights */,
1044                 0 /* avg pkt cycles */,
1045                 29 /* credits */,
1046                 0 /* rx ring used */,
1047                 4096 /* rx ring free */,
1048                 0 /* cq ring used */,
1049                 32 /* cq ring free */,
1050                 0 /* dequeue calls */,
1051                 /* 10 dequeue burst buckets */
1052                 0, 0, 0, 0, 0,
1053                 0, 0, 0, 0, 0,
1054         };
1055         ret = rte_event_dev_xstats_get(evdev,
1056                                         RTE_EVENT_DEV_XSTATS_PORT,
1057                                         0, ids, values, num_stats);
1058         for (i = 0; (signed int)i < ret; i++) {
1059                 if (port_expected_zero[i] != values[i]) {
1060                         printf(
1061                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1062                                 ", expect %"PRIu64"\n",
1063                                 __LINE__, i, ids[i], xstats_names[i].name,
1064                                 values[i], port_expected_zero[i]);
1065                         goto fail;
1066                 }
1067         }
1068
1069         /* QUEUE STATS TESTS */
1070         num_stats = rte_event_dev_xstats_names_get(evdev,
1071                                                 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1072                                                 xstats_names, ids, XSTATS_MAX);
1073         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1074                                         0, ids, values, num_stats);
1075         if (ret < 0) {
1076                 printf("xstats get returned %d\n", ret);
1077                 goto fail;
1078         }
1079         if ((unsigned int)ret > XSTATS_MAX)
1080                 printf("%s %d: more xstats available than space\n",
1081                                 __func__, __LINE__);
1082
1083         static const uint64_t queue_expected[] = {
1084                 3 /* rx */,
1085                 3 /* tx */,
1086                 0 /* drop */,
1087                 3 /* inflights */,
1088                 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1089                 /* QID-to-Port: pinned_flows, packets */
1090                 0, 0,
1091                 0, 0,
1092                 1, 3,
1093                 0, 0,
1094         };
1095         for (i = 0; (signed int)i < ret; i++) {
1096                 if (queue_expected[i] != values[i]) {
1097                         printf(
1098                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1099                                 ", expect %"PRIu64"\n",
1100                                 __LINE__, i, ids[i], xstats_names[i].name,
1101                                 values[i], queue_expected[i]);
1102                         goto fail;
1103                 }
1104         }
1105
1106         /* Reset the queue stats here */
1107         ret = rte_event_dev_xstats_reset(evdev,
1108                                         RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1109                                         NULL,
1110                                         0);
1111
1112         /* Verify that the resettable stats are reset, and others are not */
1113         static const uint64_t queue_expected_zero[] = {
1114                 0 /* rx */,
1115                 0 /* tx */,
1116                 0 /* drop */,
1117                 3 /* inflight */,
1118                 0, 0, 0, 0, /* 4 iq used */
1119                 /* QID-to-Port: pinned_flows, packets */
1120                 0, 0,
1121                 0, 0,
1122                 1, 0,
1123                 0, 0,
1124         };
1125
1126         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1127                                         ids, values, num_stats);
1128         int fails = 0;
1129         for (i = 0; (signed int)i < ret; i++) {
1130                 if (queue_expected_zero[i] != values[i]) {
1131                         printf(
1132                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1133                                 ", expect %"PRIu64"\n",
1134                                 __LINE__, i, ids[i], xstats_names[i].name,
1135                                 values[i], queue_expected_zero[i]);
1136                         fails++;
1137                 }
1138         }
1139         if (fails) {
1140                 printf("%d : %d of values were not as expected above\n",
1141                                 __LINE__, fails);
1142                 goto fail;
1143         }
1144
1145         cleanup(t);
1146         return 0;
1147
1148 fail:
1149         rte_event_dev_dump(0, stdout);
1150         cleanup(t);
1151         return -1;
1152 }
1153
1154
1155 static int
1156 xstats_id_abuse_tests(struct test *t)
1157 {
1158         int err;
1159         const uint32_t XSTATS_MAX = 1024;
1160         const uint32_t link_port = 2;
1161
1162         uint32_t ids[XSTATS_MAX];
1163         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1164
1165         /* Create instance with 4 ports */
1166         if (init(t, 1, 4) < 0 ||
1167                         create_ports(t, 4) < 0 ||
1168                         create_atomic_qids(t, 1) < 0) {
1169                 printf("%d: Error initializing device\n", __LINE__);
1170                 goto fail;
1171         }
1172
1173         err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1174         if (err != 1) {
1175                 printf("%d: error mapping lb qid\n", __LINE__);
1176                 goto fail;
1177         }
1178
1179         if (rte_event_dev_start(evdev) < 0) {
1180                 printf("%d: Error with start call\n", __LINE__);
1181                 goto fail;
1182         }
1183
1184         /* no test for device, as it ignores the port/q number */
1185         int num_stats = rte_event_dev_xstats_names_get(evdev,
1186                                         RTE_EVENT_DEV_XSTATS_PORT,
1187                                         UINT8_MAX-1, xstats_names, ids,
1188                                         XSTATS_MAX);
1189         if (num_stats != 0) {
1190                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1191                                 0, num_stats);
1192                 goto fail;
1193         }
1194
1195         num_stats = rte_event_dev_xstats_names_get(evdev,
1196                                         RTE_EVENT_DEV_XSTATS_QUEUE,
1197                                         UINT8_MAX-1, xstats_names, ids,
1198                                         XSTATS_MAX);
1199         if (num_stats != 0) {
1200                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1201                                 0, num_stats);
1202                 goto fail;
1203         }
1204
1205         cleanup(t);
1206         return 0;
1207 fail:
1208         cleanup(t);
1209         return -1;
1210 }
1211
1212 static int
1213 port_reconfig_credits(struct test *t)
1214 {
1215         if (init(t, 1, 1) < 0) {
1216                 printf("%d: Error initializing device\n", __LINE__);
1217                 return -1;
1218         }
1219
1220         uint32_t i;
1221         const uint32_t NUM_ITERS = 32;
1222         for (i = 0; i < NUM_ITERS; i++) {
1223                 const struct rte_event_queue_conf conf = {
1224                         .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1225                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1226                         .nb_atomic_flows = 1024,
1227                         .nb_atomic_order_sequences = 1024,
1228                 };
1229                 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1230                         printf("%d: error creating qid\n", __LINE__);
1231                         return -1;
1232                 }
1233                 t->qid[0] = 0;
1234
1235                 static const struct rte_event_port_conf port_conf = {
1236                                 .new_event_threshold = 128,
1237                                 .dequeue_depth = 32,
1238                                 .enqueue_depth = 64,
1239                 };
1240                 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1241                         printf("%d Error setting up port\n", __LINE__);
1242                         return -1;
1243                 }
1244
1245                 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1246                 if (links != 1) {
1247                         printf("%d: error mapping lb qid\n", __LINE__);
1248                         goto fail;
1249                 }
1250
1251                 if (rte_event_dev_start(evdev) < 0) {
1252                         printf("%d: Error with start call\n", __LINE__);
1253                         goto fail;
1254                 }
1255
1256                 const uint32_t NPKTS = 1;
1257                 uint32_t j;
1258                 for (j = 0; j < NPKTS; j++) {
1259                         struct rte_event ev;
1260                         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1261                         if (!arp) {
1262                                 printf("%d: gen of pkt failed\n", __LINE__);
1263                                 goto fail;
1264                         }
1265                         ev.queue_id = t->qid[0];
1266                         ev.op = RTE_EVENT_OP_NEW;
1267                         ev.mbuf = arp;
1268                         int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1269                         if (err != 1) {
1270                                 printf("%d: Failed to enqueue\n", __LINE__);
1271                                 rte_event_dev_dump(0, stdout);
1272                                 goto fail;
1273                         }
1274                 }
1275
1276                 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1277
1278                 struct rte_event ev[NPKTS];
1279                 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1280                                                         NPKTS, 0);
1281                 if (deq != 1)
1282                         printf("%d error; no packet dequeued\n", __LINE__);
1283
1284                 /* let cleanup below stop the device on last iter */
1285                 if (i != NUM_ITERS-1)
1286                         rte_event_dev_stop(evdev);
1287         }
1288
1289         cleanup(t);
1290         return 0;
1291 fail:
1292         cleanup(t);
1293         return -1;
1294 }
1295
1296 static int
1297 port_single_lb_reconfig(struct test *t)
1298 {
1299         if (init(t, 2, 2) < 0) {
1300                 printf("%d: Error initializing device\n", __LINE__);
1301                 goto fail;
1302         }
1303
1304         static const struct rte_event_queue_conf conf_lb_atomic = {
1305                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1306                 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1307                 .nb_atomic_flows = 1024,
1308                 .nb_atomic_order_sequences = 1024,
1309         };
1310         if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1311                 printf("%d: error creating qid\n", __LINE__);
1312                 goto fail;
1313         }
1314
1315         static const struct rte_event_queue_conf conf_single_link = {
1316                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1317                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1318         };
1319         if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1320                 printf("%d: error creating qid\n", __LINE__);
1321                 goto fail;
1322         }
1323
1324         struct rte_event_port_conf port_conf = {
1325                 .new_event_threshold = 128,
1326                 .dequeue_depth = 32,
1327                 .enqueue_depth = 64,
1328         };
1329         if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1330                 printf("%d Error setting up port\n", __LINE__);
1331                 goto fail;
1332         }
1333         if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1334                 printf("%d Error setting up port\n", __LINE__);
1335                 goto fail;
1336         }
1337
1338         /* link port to lb queue */
1339         uint8_t queue_id = 0;
1340         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1341                 printf("%d: error creating link for qid\n", __LINE__);
1342                 goto fail;
1343         }
1344
1345         int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1346         if (ret != 1) {
1347                 printf("%d: Error unlinking lb port\n", __LINE__);
1348                 goto fail;
1349         }
1350
1351         queue_id = 1;
1352         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1353                 printf("%d: error creating link for qid\n", __LINE__);
1354                 goto fail;
1355         }
1356
1357         queue_id = 0;
1358         int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1359         if (err != 1) {
1360                 printf("%d: error mapping lb qid\n", __LINE__);
1361                 goto fail;
1362         }
1363
1364         if (rte_event_dev_start(evdev) < 0) {
1365                 printf("%d: Error with start call\n", __LINE__);
1366                 goto fail;
1367         }
1368
1369         cleanup(t);
1370         return 0;
1371 fail:
1372         cleanup(t);
1373         return -1;
1374 }
1375
1376 static int
1377 xstats_brute_force(struct test *t)
1378 {
1379         uint32_t i;
1380         const uint32_t XSTATS_MAX = 1024;
1381         uint32_t ids[XSTATS_MAX];
1382         uint64_t values[XSTATS_MAX];
1383         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1384
1385
1386         /* Create instance with 4 ports */
1387         if (init(t, 1, 4) < 0 ||
1388                         create_ports(t, 4) < 0 ||
1389                         create_atomic_qids(t, 1) < 0) {
1390                 printf("%d: Error initializing device\n", __LINE__);
1391                 return -1;
1392         }
1393
1394         int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1395         if (err != 1) {
1396                 printf("%d: error mapping lb qid\n", __LINE__);
1397                 goto fail;
1398         }
1399
1400         if (rte_event_dev_start(evdev) < 0) {
1401                 printf("%d: Error with start call\n", __LINE__);
1402                 goto fail;
1403         }
1404
1405         for (i = 0; i < XSTATS_MAX; i++)
1406                 ids[i] = i;
1407
1408         for (i = 0; i < 3; i++) {
1409                 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1410                 uint32_t j;
1411                 for (j = 0; j < UINT8_MAX; j++) {
1412                         rte_event_dev_xstats_names_get(evdev, mode,
1413                                 j, xstats_names, ids, XSTATS_MAX);
1414
1415                         rte_event_dev_xstats_get(evdev, mode, j, ids,
1416                                                  values, XSTATS_MAX);
1417                 }
1418         }
1419
1420         cleanup(t);
1421         return 0;
1422 fail:
1423         cleanup(t);
1424         return -1;
1425 }
1426
1427 static int
1428 xstats_id_reset_tests(struct test *t)
1429 {
1430         const int wrk_enq = 2;
1431         int err;
1432
1433         /* Create instance with 4 ports */
1434         if (init(t, 1, 4) < 0 ||
1435                         create_ports(t, 4) < 0 ||
1436                         create_atomic_qids(t, 1) < 0) {
1437                 printf("%d: Error initializing device\n", __LINE__);
1438                 return -1;
1439         }
1440
1441         /* CQ mapping to QID */
1442         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1443         if (err != 1) {
1444                 printf("%d: error mapping lb qid\n", __LINE__);
1445                 goto fail;
1446         }
1447
1448         if (rte_event_dev_start(evdev) < 0) {
1449                 printf("%d: Error with start call\n", __LINE__);
1450                 goto fail;
1451         }
1452
1453 #define XSTATS_MAX 1024
1454         int ret;
1455         uint32_t i;
1456         uint32_t ids[XSTATS_MAX];
1457         uint64_t values[XSTATS_MAX];
1458         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1459
1460         for (i = 0; i < XSTATS_MAX; i++)
1461                 ids[i] = i;
1462
1463 #define NUM_DEV_STATS 8
1464         /* Device names / values */
1465         int num_stats = rte_event_dev_xstats_names_get(evdev,
1466                                         RTE_EVENT_DEV_XSTATS_DEVICE,
1467                                         0, xstats_names, ids, XSTATS_MAX);
1468         if (num_stats != NUM_DEV_STATS) {
1469                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1470                                 NUM_DEV_STATS, num_stats);
1471                 goto fail;
1472         }
1473         ret = rte_event_dev_xstats_get(evdev,
1474                                         RTE_EVENT_DEV_XSTATS_DEVICE,
1475                                         0, ids, values, num_stats);
1476         if (ret != NUM_DEV_STATS) {
1477                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1478                                 NUM_DEV_STATS, ret);
1479                 goto fail;
1480         }
1481
1482 #define NPKTS 7
1483         for (i = 0; i < NPKTS; i++) {
1484                 struct rte_event ev;
1485                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1486                 if (!arp) {
1487                         printf("%d: gen of pkt failed\n", __LINE__);
1488                         goto fail;
1489                 }
1490                 ev.queue_id = t->qid[i];
1491                 ev.op = RTE_EVENT_OP_NEW;
1492                 ev.mbuf = arp;
1493                 *rte_event_pmd_selftest_seqn(arp) = i;
1494
1495                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1496                 if (err != 1) {
1497                         printf("%d: Failed to enqueue\n", __LINE__);
1498                         goto fail;
1499                 }
1500         }
1501
1502         rte_service_run_iter_on_app_lcore(t->service_id, 1);
1503
1504         static const char * const dev_names[] = {
1505                 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1506                 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1507                 "dev_sched_last_iter_bitmask",
1508                 "dev_sched_progress_last_iter"
1509         };
1510         uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0, 4, 1};
1511         for (i = 0; (int)i < ret; i++) {
1512                 unsigned int id;
1513                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1514                                                                 dev_names[i],
1515                                                                 &id);
1516                 if (id != i) {
1517                         printf("%d: %s id incorrect, expected %d got %d\n",
1518                                         __LINE__, dev_names[i], i, id);
1519                         goto fail;
1520                 }
1521                 if (val != dev_expected[i]) {
1522                         printf("%d: %s value incorrect, expected %"
1523                                 PRIu64" got %"PRIu64"\n", __LINE__,
1524                                 dev_names[i], dev_expected[i], val);
1525                         goto fail;
1526                 }
1527                 /* reset to zero */
1528                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1529                                                 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1530                                                 &id,
1531                                                 1);
1532                 if (reset_ret) {
1533                         printf("%d: failed to reset successfully\n", __LINE__);
1534                         goto fail;
1535                 }
1536                 dev_expected[i] = 0;
1537                 /* check value again */
1538                 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1539                 if (val != dev_expected[i]) {
1540                         printf("%d: %s value incorrect, expected %"PRIu64
1541                                 " got %"PRIu64"\n", __LINE__, dev_names[i],
1542                                 dev_expected[i], val);
1543                         goto fail;
1544                 }
1545         };
1546
1547 /* 49 is stat offset from start of the devices whole xstats.
1548  * This WILL break every time we add a statistic to a port
1549  * or the device, but there is no other way to test
1550  */
1551 #define PORT_OFF 50
1552 /* num stats for the tested port. CQ size adds more stats to a port */
1553 #define NUM_PORT_STATS 21
1554 /* the port to test. */
1555 #define PORT 2
1556         num_stats = rte_event_dev_xstats_names_get(evdev,
1557                                         RTE_EVENT_DEV_XSTATS_PORT, PORT,
1558                                         xstats_names, ids, XSTATS_MAX);
1559         if (num_stats != NUM_PORT_STATS) {
1560                 printf("%d: expected %d stats, got return %d\n",
1561                         __LINE__, NUM_PORT_STATS, num_stats);
1562                 goto fail;
1563         }
1564         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1565                                         ids, values, num_stats);
1566
1567         if (ret != NUM_PORT_STATS) {
1568                 printf("%d: expected %d stats, got return %d\n",
1569                                 __LINE__, NUM_PORT_STATS, ret);
1570                 goto fail;
1571         }
1572         static const char * const port_names[] = {
1573                 "port_2_rx",
1574                 "port_2_tx",
1575                 "port_2_drop",
1576                 "port_2_inflight",
1577                 "port_2_avg_pkt_cycles",
1578                 "port_2_credits",
1579                 "port_2_rx_ring_used",
1580                 "port_2_rx_ring_free",
1581                 "port_2_cq_ring_used",
1582                 "port_2_cq_ring_free",
1583                 "port_2_dequeue_calls",
1584                 "port_2_dequeues_returning_0",
1585                 "port_2_dequeues_returning_1-4",
1586                 "port_2_dequeues_returning_5-8",
1587                 "port_2_dequeues_returning_9-12",
1588                 "port_2_dequeues_returning_13-16",
1589                 "port_2_dequeues_returning_17-20",
1590                 "port_2_dequeues_returning_21-24",
1591                 "port_2_dequeues_returning_25-28",
1592                 "port_2_dequeues_returning_29-32",
1593                 "port_2_dequeues_returning_33-36",
1594         };
1595         uint64_t port_expected[] = {
1596                 0, /* rx */
1597                 NPKTS, /* tx */
1598                 0, /* drop */
1599                 NPKTS, /* inflight */
1600                 0, /* avg pkt cycles */
1601                 0, /* credits */
1602                 0, /* rx ring used */
1603                 4096, /* rx ring free */
1604                 NPKTS,  /* cq ring used */
1605                 25, /* cq ring free */
1606                 0, /* dequeue zero calls */
1607                 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1608                 0, 0, 0, 0, 0,
1609         };
1610         uint64_t port_expected_zero[] = {
1611                 0, /* rx */
1612                 0, /* tx */
1613                 0, /* drop */
1614                 NPKTS, /* inflight */
1615                 0, /* avg pkt cycles */
1616                 0, /* credits */
1617                 0, /* rx ring used */
1618                 4096, /* rx ring free */
1619                 NPKTS,  /* cq ring used */
1620                 25, /* cq ring free */
1621                 0, /* dequeue zero calls */
1622                 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1623                 0, 0, 0, 0, 0,
1624         };
1625         if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1626                         RTE_DIM(port_names) != NUM_PORT_STATS) {
1627                 printf("%d: port array of wrong size\n", __LINE__);
1628                 goto fail;
1629         }
1630
1631         int failed = 0;
1632         for (i = 0; (int)i < ret; i++) {
1633                 unsigned int id;
1634                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1635                                                                 port_names[i],
1636                                                                 &id);
1637                 if (id != i + PORT_OFF) {
1638                         printf("%d: %s id incorrect, expected %d got %d\n",
1639                                         __LINE__, port_names[i], i+PORT_OFF,
1640                                         id);
1641                         failed = 1;
1642                 }
1643                 if (val != port_expected[i]) {
1644                         printf("%d: %s value incorrect, expected %"PRIu64
1645                                 " got %d\n", __LINE__, port_names[i],
1646                                 port_expected[i], id);
1647                         failed = 1;
1648                 }
1649                 /* reset to zero */
1650                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1651                                                 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1652                                                 &id,
1653                                                 1);
1654                 if (reset_ret) {
1655                         printf("%d: failed to reset successfully\n", __LINE__);
1656                         failed = 1;
1657                 }
1658                 /* check value again */
1659                 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1660                 if (val != port_expected_zero[i]) {
1661                         printf("%d: %s value incorrect, expected %"PRIu64
1662                                 " got %"PRIu64"\n", __LINE__, port_names[i],
1663                                 port_expected_zero[i], val);
1664                         failed = 1;
1665                 }
1666         };
1667         if (failed)
1668                 goto fail;
1669
1670 /* num queue stats */
1671 #define NUM_Q_STATS 16
1672 /* queue offset from start of the devices whole xstats.
1673  * This will break every time we add a statistic to a device/port/queue
1674  */
1675 #define QUEUE_OFF 92
1676         const uint32_t queue = 0;
1677         num_stats = rte_event_dev_xstats_names_get(evdev,
1678                                         RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1679                                         xstats_names, ids, XSTATS_MAX);
1680         if (num_stats != NUM_Q_STATS) {
1681                 printf("%d: expected %d stats, got return %d\n",
1682                         __LINE__, NUM_Q_STATS, num_stats);
1683                 goto fail;
1684         }
1685         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1686                                         queue, ids, values, num_stats);
1687         if (ret != NUM_Q_STATS) {
1688                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1689                 goto fail;
1690         }
1691         static const char * const queue_names[] = {
1692                 "qid_0_rx",
1693                 "qid_0_tx",
1694                 "qid_0_drop",
1695                 "qid_0_inflight",
1696                 "qid_0_iq_0_used",
1697                 "qid_0_iq_1_used",
1698                 "qid_0_iq_2_used",
1699                 "qid_0_iq_3_used",
1700                 "qid_0_port_0_pinned_flows",
1701                 "qid_0_port_0_packets",
1702                 "qid_0_port_1_pinned_flows",
1703                 "qid_0_port_1_packets",
1704                 "qid_0_port_2_pinned_flows",
1705                 "qid_0_port_2_packets",
1706                 "qid_0_port_3_pinned_flows",
1707                 "qid_0_port_3_packets",
1708         };
1709         uint64_t queue_expected[] = {
1710                 7, /* rx */
1711                 7, /* tx */
1712                 0, /* drop */
1713                 7, /* inflight */
1714                 0, /* iq 0 used */
1715                 0, /* iq 1 used */
1716                 0, /* iq 2 used */
1717                 0, /* iq 3 used */
1718                 /* QID-to-Port: pinned_flows, packets */
1719                 0, 0,
1720                 0, 0,
1721                 1, 7,
1722                 0, 0,
1723         };
1724         uint64_t queue_expected_zero[] = {
1725                 0, /* rx */
1726                 0, /* tx */
1727                 0, /* drop */
1728                 7, /* inflight */
1729                 0, /* iq 0 used */
1730                 0, /* iq 1 used */
1731                 0, /* iq 2 used */
1732                 0, /* iq 3 used */
1733                 /* QID-to-Port: pinned_flows, packets */
1734                 0, 0,
1735                 0, 0,
1736                 1, 0,
1737                 0, 0,
1738         };
1739         if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1740                         RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1741                         RTE_DIM(queue_names) != NUM_Q_STATS) {
1742                 printf("%d : queue array of wrong size\n", __LINE__);
1743                 goto fail;
1744         }
1745
1746         failed = 0;
1747         for (i = 0; (int)i < ret; i++) {
1748                 unsigned int id;
1749                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1750                                                                 queue_names[i],
1751                                                                 &id);
1752                 if (id != i + QUEUE_OFF) {
1753                         printf("%d: %s id incorrect, expected %d got %d\n",
1754                                         __LINE__, queue_names[i], i+QUEUE_OFF,
1755                                         id);
1756                         failed = 1;
1757                 }
1758                 if (val != queue_expected[i]) {
1759                         printf("%d: %d: %s value , expected %"PRIu64
1760                                 " got %"PRIu64"\n", i, __LINE__,
1761                                 queue_names[i], queue_expected[i], val);
1762                         failed = 1;
1763                 }
1764                 /* reset to zero */
1765                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1766                                                 RTE_EVENT_DEV_XSTATS_QUEUE,
1767                                                 queue, &id, 1);
1768                 if (reset_ret) {
1769                         printf("%d: failed to reset successfully\n", __LINE__);
1770                         failed = 1;
1771                 }
1772                 /* check value again */
1773                 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1774                                                         0);
1775                 if (val != queue_expected_zero[i]) {
1776                         printf("%d: %s value incorrect, expected %"PRIu64
1777                                 " got %"PRIu64"\n", __LINE__, queue_names[i],
1778                                 queue_expected_zero[i], val);
1779                         failed = 1;
1780                 }
1781         };
1782
1783         if (failed)
1784                 goto fail;
1785
1786         cleanup(t);
1787         return 0;
1788 fail:
1789         cleanup(t);
1790         return -1;
1791 }
1792
1793 static int
1794 ordered_reconfigure(struct test *t)
1795 {
1796         if (init(t, 1, 1) < 0 ||
1797                         create_ports(t, 1) < 0) {
1798                 printf("%d: Error initializing device\n", __LINE__);
1799                 return -1;
1800         }
1801
1802         const struct rte_event_queue_conf conf = {
1803                         .schedule_type = RTE_SCHED_TYPE_ORDERED,
1804                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1805                         .nb_atomic_flows = 1024,
1806                         .nb_atomic_order_sequences = 1024,
1807         };
1808
1809         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1810                 printf("%d: error creating qid\n", __LINE__);
1811                 goto failed;
1812         }
1813
1814         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1815                 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1816                 goto failed;
1817         }
1818
1819         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1820         if (rte_event_dev_start(evdev) < 0) {
1821                 printf("%d: Error with start call\n", __LINE__);
1822                 return -1;
1823         }
1824
1825         cleanup(t);
1826         return 0;
1827 failed:
1828         cleanup(t);
1829         return -1;
1830 }
1831
1832 static int
1833 qid_priorities(struct test *t)
1834 {
1835         /* Test works by having a CQ with enough empty space for all packets,
1836          * and enqueueing 3 packets to 3 QIDs. They must return based on the
1837          * priority of the QID, not the ingress order, to pass the test
1838          */
1839         unsigned int i;
1840         /* Create instance with 1 ports, and 3 qids */
1841         if (init(t, 3, 1) < 0 ||
1842                         create_ports(t, 1) < 0) {
1843                 printf("%d: Error initializing device\n", __LINE__);
1844                 return -1;
1845         }
1846
1847         for (i = 0; i < 3; i++) {
1848                 /* Create QID */
1849                 const struct rte_event_queue_conf conf = {
1850                         .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1851                         /* increase priority (0 == highest), as we go */
1852                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1853                         .nb_atomic_flows = 1024,
1854                         .nb_atomic_order_sequences = 1024,
1855                 };
1856
1857                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1858                         printf("%d: error creating qid %d\n", __LINE__, i);
1859                         return -1;
1860                 }
1861                 t->qid[i] = i;
1862         }
1863         t->nb_qids = i;
1864         /* map all QIDs to port */
1865         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1866
1867         if (rte_event_dev_start(evdev) < 0) {
1868                 printf("%d: Error with start call\n", __LINE__);
1869                 return -1;
1870         }
1871
1872         /* enqueue 3 packets, setting seqn and QID to check priority */
1873         for (i = 0; i < 3; i++) {
1874                 struct rte_event ev;
1875                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1876                 if (!arp) {
1877                         printf("%d: gen of pkt failed\n", __LINE__);
1878                         return -1;
1879                 }
1880                 ev.queue_id = t->qid[i];
1881                 ev.op = RTE_EVENT_OP_NEW;
1882                 ev.mbuf = arp;
1883                 *rte_event_pmd_selftest_seqn(arp) = i;
1884
1885                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1886                 if (err != 1) {
1887                         printf("%d: Failed to enqueue\n", __LINE__);
1888                         return -1;
1889                 }
1890         }
1891
1892         rte_service_run_iter_on_app_lcore(t->service_id, 1);
1893
1894         /* dequeue packets, verify priority was upheld */
1895         struct rte_event ev[32];
1896         uint32_t deq_pkts =
1897                 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1898         if (deq_pkts != 3) {
1899                 printf("%d: failed to deq packets\n", __LINE__);
1900                 rte_event_dev_dump(evdev, stdout);
1901                 return -1;
1902         }
1903         for (i = 0; i < 3; i++) {
1904                 if (*rte_event_pmd_selftest_seqn(ev[i].mbuf) != 2-i) {
1905                         printf(
1906                                 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1907                                         __LINE__, i);
1908                 }
1909         }
1910
1911         cleanup(t);
1912         return 0;
1913 }
1914
1915 static int
1916 unlink_in_progress(struct test *t)
1917 {
1918         /* Test unlinking API, in particular that when an unlink request has
1919          * not yet been seen by the scheduler thread, that the
1920          * unlink_in_progress() function returns the number of unlinks.
1921          */
1922         unsigned int i;
1923         /* Create instance with 1 ports, and 3 qids */
1924         if (init(t, 3, 1) < 0 ||
1925                         create_ports(t, 1) < 0) {
1926                 printf("%d: Error initializing device\n", __LINE__);
1927                 return -1;
1928         }
1929
1930         for (i = 0; i < 3; i++) {
1931                 /* Create QID */
1932                 const struct rte_event_queue_conf conf = {
1933                         .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1934                         /* increase priority (0 == highest), as we go */
1935                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1936                         .nb_atomic_flows = 1024,
1937                         .nb_atomic_order_sequences = 1024,
1938                 };
1939
1940                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1941                         printf("%d: error creating qid %d\n", __LINE__, i);
1942                         return -1;
1943                 }
1944                 t->qid[i] = i;
1945         }
1946         t->nb_qids = i;
1947         /* map all QIDs to port */
1948         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1949
1950         if (rte_event_dev_start(evdev) < 0) {
1951                 printf("%d: Error with start call\n", __LINE__);
1952                 return -1;
1953         }
1954
1955         /* unlink all ports to have outstanding unlink requests */
1956         int ret = rte_event_port_unlink(evdev, t->port[0], NULL, 0);
1957         if (ret < 0) {
1958                 printf("%d: Failed to unlink queues\n", __LINE__);
1959                 return -1;
1960         }
1961
1962         /* get active unlinks here, expect 3 */
1963         int unlinks_in_progress =
1964                 rte_event_port_unlinks_in_progress(evdev, t->port[0]);
1965         if (unlinks_in_progress != 3) {
1966                 printf("%d: Expected num unlinks in progress == 3, got %d\n",
1967                                 __LINE__, unlinks_in_progress);
1968                 return -1;
1969         }
1970
1971         /* run scheduler service on this thread to ack the unlinks */
1972         rte_service_run_iter_on_app_lcore(t->service_id, 1);
1973
1974         /* active unlinks expected as 0 as scheduler thread has acked */
1975         unlinks_in_progress =
1976                 rte_event_port_unlinks_in_progress(evdev, t->port[0]);
1977         if (unlinks_in_progress != 0) {
1978                 printf("%d: Expected num unlinks in progress == 0, got %d\n",
1979                                 __LINE__, unlinks_in_progress);
1980         }
1981
1982         cleanup(t);
1983         return 0;
1984 }
1985
1986 static int
1987 load_balancing(struct test *t)
1988 {
1989         const int rx_enq = 0;
1990         int err;
1991         uint32_t i;
1992
1993         if (init(t, 1, 4) < 0 ||
1994                         create_ports(t, 4) < 0 ||
1995                         create_atomic_qids(t, 1) < 0) {
1996                 printf("%d: Error initializing device\n", __LINE__);
1997                 return -1;
1998         }
1999
2000         for (i = 0; i < 3; i++) {
2001                 /* map port 1 - 3 inclusive */
2002                 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
2003                                 NULL, 1) != 1) {
2004                         printf("%d: error mapping qid to port %d\n",
2005                                         __LINE__, i);
2006                         return -1;
2007                 }
2008         }
2009
2010         if (rte_event_dev_start(evdev) < 0) {
2011                 printf("%d: Error with start call\n", __LINE__);
2012                 return -1;
2013         }
2014
2015         /************** FORWARD ****************/
2016         /*
2017          * Create a set of flows that test the load-balancing operation of the
2018          * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
2019          * with a new flow, which should be sent to the 3rd mapped CQ
2020          */
2021         static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
2022
2023         for (i = 0; i < RTE_DIM(flows); i++) {
2024                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2025                 if (!arp) {
2026                         printf("%d: gen of pkt failed\n", __LINE__);
2027                         return -1;
2028                 }
2029
2030                 struct rte_event ev = {
2031                                 .op = RTE_EVENT_OP_NEW,
2032                                 .queue_id = t->qid[0],
2033                                 .flow_id = flows[i],
2034                                 .mbuf = arp,
2035                 };
2036                 /* generate pkt and enqueue */
2037                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2038                 if (err != 1) {
2039                         printf("%d: Failed to enqueue\n", __LINE__);
2040                         return -1;
2041                 }
2042         }
2043
2044         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2045
2046         struct test_event_dev_stats stats;
2047         err = test_event_dev_stats_get(evdev, &stats);
2048         if (err) {
2049                 printf("%d: failed to get stats\n", __LINE__);
2050                 return -1;
2051         }
2052
2053         if (stats.port_inflight[1] != 4) {
2054                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2055                                 __func__);
2056                 return -1;
2057         }
2058         if (stats.port_inflight[2] != 2) {
2059                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2060                                 __func__);
2061                 return -1;
2062         }
2063         if (stats.port_inflight[3] != 3) {
2064                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2065                                 __func__);
2066                 return -1;
2067         }
2068
2069         cleanup(t);
2070         return 0;
2071 }
2072
2073 static int
2074 load_balancing_history(struct test *t)
2075 {
2076         struct test_event_dev_stats stats = {0};
2077         const int rx_enq = 0;
2078         int err;
2079         uint32_t i;
2080
2081         /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2082         if (init(t, 1, 4) < 0 ||
2083                         create_ports(t, 4) < 0 ||
2084                         create_atomic_qids(t, 1) < 0)
2085                 return -1;
2086
2087         /* CQ mapping to QID */
2088         if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2089                 printf("%d: error mapping port 1 qid\n", __LINE__);
2090                 return -1;
2091         }
2092         if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2093                 printf("%d: error mapping port 2 qid\n", __LINE__);
2094                 return -1;
2095         }
2096         if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2097                 printf("%d: error mapping port 3 qid\n", __LINE__);
2098                 return -1;
2099         }
2100         if (rte_event_dev_start(evdev) < 0) {
2101                 printf("%d: Error with start call\n", __LINE__);
2102                 return -1;
2103         }
2104
2105         /*
2106          * Create a set of flows that test the load-balancing operation of the
2107          * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2108          * the packet from CQ 0, send in a new set of flows. Ensure that:
2109          *  1. The new flow 3 gets into the empty CQ0
2110          *  2. packets for existing flow gets added into CQ1
2111          *  3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2112          *     more outstanding pkts
2113          *
2114          *  This test makes sure that when a flow ends (i.e. all packets
2115          *  have been completed for that flow), that the flow can be moved
2116          *  to a different CQ when new packets come in for that flow.
2117          */
2118         static uint32_t flows1[] = {0, 1, 1, 2};
2119
2120         for (i = 0; i < RTE_DIM(flows1); i++) {
2121                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2122                 struct rte_event ev = {
2123                                 .flow_id = flows1[i],
2124                                 .op = RTE_EVENT_OP_NEW,
2125                                 .queue_id = t->qid[0],
2126                                 .event_type = RTE_EVENT_TYPE_CPU,
2127                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2128                                 .mbuf = arp
2129                 };
2130
2131                 if (!arp) {
2132                         printf("%d: gen of pkt failed\n", __LINE__);
2133                         return -1;
2134                 }
2135                 arp->hash.rss = flows1[i];
2136                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2137                 if (err != 1) {
2138                         printf("%d: Failed to enqueue\n", __LINE__);
2139                         return -1;
2140                 }
2141         }
2142
2143         /* call the scheduler */
2144         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2145
2146         /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2147         struct rte_event ev;
2148         if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2149                 printf("%d: failed to dequeue\n", __LINE__);
2150                 return -1;
2151         }
2152         if (ev.mbuf->hash.rss != flows1[0]) {
2153                 printf("%d: unexpected flow received\n", __LINE__);
2154                 return -1;
2155         }
2156
2157         /* drop the flow 0 packet from port 1 */
2158         rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2159
2160         /* call the scheduler */
2161         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2162
2163         /*
2164          * Set up the next set of flows, first a new flow to fill up
2165          * CQ 0, so that the next flow 0 packet should go to CQ2
2166          */
2167         static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2168
2169         for (i = 0; i < RTE_DIM(flows2); i++) {
2170                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2171                 struct rte_event ev = {
2172                                 .flow_id = flows2[i],
2173                                 .op = RTE_EVENT_OP_NEW,
2174                                 .queue_id = t->qid[0],
2175                                 .event_type = RTE_EVENT_TYPE_CPU,
2176                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2177                                 .mbuf = arp
2178                 };
2179
2180                 if (!arp) {
2181                         printf("%d: gen of pkt failed\n", __LINE__);
2182                         return -1;
2183                 }
2184                 arp->hash.rss = flows2[i];
2185
2186                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2187                 if (err != 1) {
2188                         printf("%d: Failed to enqueue\n", __LINE__);
2189                         return -1;
2190                 }
2191         }
2192
2193         /* schedule */
2194         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2195
2196         err = test_event_dev_stats_get(evdev, &stats);
2197         if (err) {
2198                 printf("%d:failed to get stats\n", __LINE__);
2199                 return -1;
2200         }
2201
2202         /*
2203          * Now check the resulting inflights on each port.
2204          */
2205         if (stats.port_inflight[1] != 3) {
2206                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2207                                 __func__);
2208                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2209                                 (unsigned int)stats.port_inflight[1],
2210                                 (unsigned int)stats.port_inflight[2],
2211                                 (unsigned int)stats.port_inflight[3]);
2212                 return -1;
2213         }
2214         if (stats.port_inflight[2] != 4) {
2215                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2216                                 __func__);
2217                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2218                                 (unsigned int)stats.port_inflight[1],
2219                                 (unsigned int)stats.port_inflight[2],
2220                                 (unsigned int)stats.port_inflight[3]);
2221                 return -1;
2222         }
2223         if (stats.port_inflight[3] != 2) {
2224                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2225                                 __func__);
2226                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2227                                 (unsigned int)stats.port_inflight[1],
2228                                 (unsigned int)stats.port_inflight[2],
2229                                 (unsigned int)stats.port_inflight[3]);
2230                 return -1;
2231         }
2232
2233         for (i = 1; i <= 3; i++) {
2234                 struct rte_event ev;
2235                 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2236                         rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2237         }
2238         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2239
2240         cleanup(t);
2241         return 0;
2242 }
2243
2244 static int
2245 invalid_qid(struct test *t)
2246 {
2247         struct test_event_dev_stats stats;
2248         const int rx_enq = 0;
2249         int err;
2250         uint32_t i;
2251
2252         if (init(t, 1, 4) < 0 ||
2253                         create_ports(t, 4) < 0 ||
2254                         create_atomic_qids(t, 1) < 0) {
2255                 printf("%d: Error initializing device\n", __LINE__);
2256                 return -1;
2257         }
2258
2259         /* CQ mapping to QID */
2260         for (i = 0; i < 4; i++) {
2261                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2262                                 NULL, 1);
2263                 if (err != 1) {
2264                         printf("%d: error mapping port 1 qid\n", __LINE__);
2265                         return -1;
2266                 }
2267         }
2268
2269         if (rte_event_dev_start(evdev) < 0) {
2270                 printf("%d: Error with start call\n", __LINE__);
2271                 return -1;
2272         }
2273
2274         /*
2275          * Send in a packet with an invalid qid to the scheduler.
2276          * We should see the packed enqueued OK, but the inflights for
2277          * that packet should not be incremented, and the rx_dropped
2278          * should be incremented.
2279          */
2280         static uint32_t flows1[] = {20};
2281
2282         for (i = 0; i < RTE_DIM(flows1); i++) {
2283                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2284                 if (!arp) {
2285                         printf("%d: gen of pkt failed\n", __LINE__);
2286                         return -1;
2287                 }
2288
2289                 struct rte_event ev = {
2290                                 .op = RTE_EVENT_OP_NEW,
2291                                 .queue_id = t->qid[0] + flows1[i],
2292                                 .flow_id = i,
2293                                 .mbuf = arp,
2294                 };
2295                 /* generate pkt and enqueue */
2296                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2297                 if (err != 1) {
2298                         printf("%d: Failed to enqueue\n", __LINE__);
2299                         return -1;
2300                 }
2301         }
2302
2303         /* call the scheduler */
2304         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2305
2306         err = test_event_dev_stats_get(evdev, &stats);
2307         if (err) {
2308                 printf("%d: failed to get stats\n", __LINE__);
2309                 return -1;
2310         }
2311
2312         /*
2313          * Now check the resulting inflights on the port, and the rx_dropped.
2314          */
2315         if (stats.port_inflight[0] != 0) {
2316                 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2317                                 __func__);
2318                 rte_event_dev_dump(evdev, stdout);
2319                 return -1;
2320         }
2321         if (stats.port_rx_dropped[0] != 1) {
2322                 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2323                 rte_event_dev_dump(evdev, stdout);
2324                 return -1;
2325         }
2326         /* each packet drop should only be counted in one place - port or dev */
2327         if (stats.rx_dropped != 0) {
2328                 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2329                                 __func__);
2330                 rte_event_dev_dump(evdev, stdout);
2331                 return -1;
2332         }
2333
2334         cleanup(t);
2335         return 0;
2336 }
2337
2338 static int
2339 single_packet(struct test *t)
2340 {
2341         const uint32_t MAGIC_SEQN = 7321;
2342         struct rte_event ev;
2343         struct test_event_dev_stats stats;
2344         const int rx_enq = 0;
2345         const int wrk_enq = 2;
2346         int err;
2347
2348         /* Create instance with 4 ports */
2349         if (init(t, 1, 4) < 0 ||
2350                         create_ports(t, 4) < 0 ||
2351                         create_atomic_qids(t, 1) < 0) {
2352                 printf("%d: Error initializing device\n", __LINE__);
2353                 return -1;
2354         }
2355
2356         /* CQ mapping to QID */
2357         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2358         if (err != 1) {
2359                 printf("%d: error mapping lb qid\n", __LINE__);
2360                 cleanup(t);
2361                 return -1;
2362         }
2363
2364         if (rte_event_dev_start(evdev) < 0) {
2365                 printf("%d: Error with start call\n", __LINE__);
2366                 return -1;
2367         }
2368
2369         /************** Gen pkt and enqueue ****************/
2370         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2371         if (!arp) {
2372                 printf("%d: gen of pkt failed\n", __LINE__);
2373                 return -1;
2374         }
2375
2376         ev.op = RTE_EVENT_OP_NEW;
2377         ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2378         ev.mbuf = arp;
2379         ev.queue_id = 0;
2380         ev.flow_id = 3;
2381         *rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN;
2382
2383         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2384         if (err != 1) {
2385                 printf("%d: Failed to enqueue\n", __LINE__);
2386                 return -1;
2387         }
2388
2389         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2390
2391         err = test_event_dev_stats_get(evdev, &stats);
2392         if (err) {
2393                 printf("%d: failed to get stats\n", __LINE__);
2394                 return -1;
2395         }
2396
2397         if (stats.rx_pkts != 1 ||
2398                         stats.tx_pkts != 1 ||
2399                         stats.port_inflight[wrk_enq] != 1) {
2400                 printf("%d: Sched core didn't handle pkt as expected\n",
2401                                 __LINE__);
2402                 rte_event_dev_dump(evdev, stdout);
2403                 return -1;
2404         }
2405
2406         uint32_t deq_pkts;
2407
2408         deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2409         if (deq_pkts < 1) {
2410                 printf("%d: Failed to deq\n", __LINE__);
2411                 return -1;
2412         }
2413
2414         err = test_event_dev_stats_get(evdev, &stats);
2415         if (err) {
2416                 printf("%d: failed to get stats\n", __LINE__);
2417                 return -1;
2418         }
2419
2420         err = test_event_dev_stats_get(evdev, &stats);
2421         if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN) {
2422                 printf("%d: magic sequence number not dequeued\n", __LINE__);
2423                 return -1;
2424         }
2425
2426         rte_pktmbuf_free(ev.mbuf);
2427         err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2428         if (err != 1) {
2429                 printf("%d: Failed to enqueue\n", __LINE__);
2430                 return -1;
2431         }
2432         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2433
2434         err = test_event_dev_stats_get(evdev, &stats);
2435         if (stats.port_inflight[wrk_enq] != 0) {
2436                 printf("%d: port inflight not correct\n", __LINE__);
2437                 return -1;
2438         }
2439
2440         cleanup(t);
2441         return 0;
2442 }
2443
2444 static int
2445 inflight_counts(struct test *t)
2446 {
2447         struct rte_event ev;
2448         struct test_event_dev_stats stats;
2449         const int rx_enq = 0;
2450         const int p1 = 1;
2451         const int p2 = 2;
2452         int err;
2453         int i;
2454
2455         /* Create instance with 4 ports */
2456         if (init(t, 2, 3) < 0 ||
2457                         create_ports(t, 3) < 0 ||
2458                         create_atomic_qids(t, 2) < 0) {
2459                 printf("%d: Error initializing device\n", __LINE__);
2460                 return -1;
2461         }
2462
2463         /* CQ mapping to QID */
2464         err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2465         if (err != 1) {
2466                 printf("%d: error mapping lb qid\n", __LINE__);
2467                 cleanup(t);
2468                 return -1;
2469         }
2470         err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2471         if (err != 1) {
2472                 printf("%d: error mapping lb qid\n", __LINE__);
2473                 cleanup(t);
2474                 return -1;
2475         }
2476
2477         if (rte_event_dev_start(evdev) < 0) {
2478                 printf("%d: Error with start call\n", __LINE__);
2479                 return -1;
2480         }
2481
2482         /************** FORWARD ****************/
2483 #define QID1_NUM 5
2484         for (i = 0; i < QID1_NUM; i++) {
2485                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2486
2487                 if (!arp) {
2488                         printf("%d: gen of pkt failed\n", __LINE__);
2489                         goto err;
2490                 }
2491
2492                 ev.queue_id =  t->qid[0];
2493                 ev.op = RTE_EVENT_OP_NEW;
2494                 ev.mbuf = arp;
2495                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2496                 if (err != 1) {
2497                         printf("%d: Failed to enqueue\n", __LINE__);
2498                         goto err;
2499                 }
2500         }
2501 #define QID2_NUM 3
2502         for (i = 0; i < QID2_NUM; i++) {
2503                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2504
2505                 if (!arp) {
2506                         printf("%d: gen of pkt failed\n", __LINE__);
2507                         goto err;
2508                 }
2509                 ev.queue_id =  t->qid[1];
2510                 ev.op = RTE_EVENT_OP_NEW;
2511                 ev.mbuf = arp;
2512                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2513                 if (err != 1) {
2514                         printf("%d: Failed to enqueue\n", __LINE__);
2515                         goto err;
2516                 }
2517         }
2518
2519         /* schedule */
2520         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2521
2522         err = test_event_dev_stats_get(evdev, &stats);
2523         if (err) {
2524                 printf("%d: failed to get stats\n", __LINE__);
2525                 goto err;
2526         }
2527
2528         if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2529                         stats.tx_pkts != QID1_NUM + QID2_NUM) {
2530                 printf("%d: Sched core didn't handle pkt as expected\n",
2531                                 __LINE__);
2532                 goto err;
2533         }
2534
2535         if (stats.port_inflight[p1] != QID1_NUM) {
2536                 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2537                                 __func__);
2538                 goto err;
2539         }
2540         if (stats.port_inflight[p2] != QID2_NUM) {
2541                 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2542                                 __func__);
2543                 goto err;
2544         }
2545
2546         /************** DEQUEUE INFLIGHT COUNT CHECKS  ****************/
2547         /* port 1 */
2548         struct rte_event events[QID1_NUM + QID2_NUM];
2549         uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2550                         RTE_DIM(events), 0);
2551
2552         if (deq_pkts != QID1_NUM) {
2553                 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2554                 goto err;
2555         }
2556         err = test_event_dev_stats_get(evdev, &stats);
2557         if (stats.port_inflight[p1] != QID1_NUM) {
2558                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2559                                 __LINE__);
2560                 goto err;
2561         }
2562         for (i = 0; i < QID1_NUM; i++) {
2563                 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2564                                 1);
2565                 if (err != 1) {
2566                         printf("%d: %s rte enqueue of inf release failed\n",
2567                                 __LINE__, __func__);
2568                         goto err;
2569                 }
2570         }
2571
2572         /*
2573          * As the scheduler core decrements inflights, it needs to run to
2574          * process packets to act on the drop messages
2575          */
2576         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2577
2578         err = test_event_dev_stats_get(evdev, &stats);
2579         if (stats.port_inflight[p1] != 0) {
2580                 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2581                 goto err;
2582         }
2583
2584         /* port2 */
2585         deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2586                         RTE_DIM(events), 0);
2587         if (deq_pkts != QID2_NUM) {
2588                 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2589                 goto err;
2590         }
2591         err = test_event_dev_stats_get(evdev, &stats);
2592         if (stats.port_inflight[p2] != QID2_NUM) {
2593                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2594                                 __LINE__);
2595                 goto err;
2596         }
2597         for (i = 0; i < QID2_NUM; i++) {
2598                 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2599                                 1);
2600                 if (err != 1) {
2601                         printf("%d: %s rte enqueue of inf release failed\n",
2602                                 __LINE__, __func__);
2603                         goto err;
2604                 }
2605         }
2606
2607         /*
2608          * As the scheduler core decrements inflights, it needs to run to
2609          * process packets to act on the drop messages
2610          */
2611         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2612
2613         err = test_event_dev_stats_get(evdev, &stats);
2614         if (stats.port_inflight[p2] != 0) {
2615                 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2616                 goto err;
2617         }
2618         cleanup(t);
2619         return 0;
2620
2621 err:
2622         rte_event_dev_dump(evdev, stdout);
2623         cleanup(t);
2624         return -1;
2625 }
2626
2627 static int
2628 parallel_basic(struct test *t, int check_order)
2629 {
2630         const uint8_t rx_port = 0;
2631         const uint8_t w1_port = 1;
2632         const uint8_t w3_port = 3;
2633         const uint8_t tx_port = 4;
2634         int err;
2635         int i;
2636         uint32_t deq_pkts, j;
2637         struct rte_mbuf *mbufs[3];
2638         struct rte_mbuf *mbufs_out[3] = { 0 };
2639         const uint32_t MAGIC_SEQN = 1234;
2640
2641         /* Create instance with 4 ports */
2642         if (init(t, 2, tx_port + 1) < 0 ||
2643                         create_ports(t, tx_port + 1) < 0 ||
2644                         (check_order ?  create_ordered_qids(t, 1) :
2645                                 create_unordered_qids(t, 1)) < 0 ||
2646                         create_directed_qids(t, 1, &tx_port)) {
2647                 printf("%d: Error initializing device\n", __LINE__);
2648                 return -1;
2649         }
2650
2651         /*
2652          * CQ mapping to QID
2653          * We need three ports, all mapped to the same ordered qid0. Then we'll
2654          * take a packet out to each port, re-enqueue in reverse order,
2655          * then make sure the reordering has taken place properly when we
2656          * dequeue from the tx_port.
2657          *
2658          * Simplified test setup diagram:
2659          *
2660          * rx_port        w1_port
2661          *        \     /         \
2662          *         qid0 - w2_port - qid1
2663          *              \         /     \
2664          *                w3_port        tx_port
2665          */
2666         /* CQ mapping to QID for LB ports (directed mapped on create) */
2667         for (i = w1_port; i <= w3_port; i++) {
2668                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2669                                 1);
2670                 if (err != 1) {
2671                         printf("%d: error mapping lb qid\n", __LINE__);
2672                         cleanup(t);
2673                         return -1;
2674                 }
2675         }
2676
2677         if (rte_event_dev_start(evdev) < 0) {
2678                 printf("%d: Error with start call\n", __LINE__);
2679                 return -1;
2680         }
2681
2682         /* Enqueue 3 packets to the rx port */
2683         for (i = 0; i < 3; i++) {
2684                 struct rte_event ev;
2685                 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2686                 if (!mbufs[i]) {
2687                         printf("%d: gen of pkt failed\n", __LINE__);
2688                         return -1;
2689                 }
2690
2691                 ev.queue_id = t->qid[0];
2692                 ev.op = RTE_EVENT_OP_NEW;
2693                 ev.mbuf = mbufs[i];
2694                 *rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i;
2695
2696                 /* generate pkt and enqueue */
2697                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2698                 if (err != 1) {
2699                         printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2700                                         __LINE__, i, err);
2701                         return -1;
2702                 }
2703         }
2704
2705         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2706
2707         /* use extra slot to make logic in loops easier */
2708         struct rte_event deq_ev[w3_port + 1];
2709
2710         /* Dequeue the 3 packets, one from each worker port */
2711         for (i = w1_port; i <= w3_port; i++) {
2712                 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2713                                 &deq_ev[i], 1, 0);
2714                 if (deq_pkts != 1) {
2715                         printf("%d: Failed to deq\n", __LINE__);
2716                         rte_event_dev_dump(evdev, stdout);
2717                         return -1;
2718                 }
2719         }
2720
2721         /* Enqueue each packet in reverse order, flushing after each one */
2722         for (i = w3_port; i >= w1_port; i--) {
2723
2724                 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2725                 deq_ev[i].queue_id = t->qid[1];
2726                 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2727                 if (err != 1) {
2728                         printf("%d: Failed to enqueue\n", __LINE__);
2729                         return -1;
2730                 }
2731         }
2732         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2733
2734         /* dequeue from the tx ports, we should get 3 packets */
2735         deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2736                         3, 0);
2737
2738         /* Check to see if we've got all 3 packets */
2739         if (deq_pkts != 3) {
2740                 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2741                         __LINE__, deq_pkts, tx_port);
2742                 rte_event_dev_dump(evdev, stdout);
2743                 return 1;
2744         }
2745
2746         /* Check to see if the sequence numbers are in expected order */
2747         if (check_order) {
2748                 for (j = 0 ; j < deq_pkts ; j++) {
2749                         if (*rte_event_pmd_selftest_seqn(deq_ev[j].mbuf) !=
2750                                         MAGIC_SEQN + j) {
2751                                 printf("%d: Incorrect sequence number(%d) from port %d\n",
2752                                         __LINE__,
2753                                         *rte_event_pmd_selftest_seqn(mbufs_out[j]),
2754                                         tx_port);
2755                                 return -1;
2756                         }
2757                 }
2758         }
2759
2760         /* Destroy the instance */
2761         cleanup(t);
2762         return 0;
2763 }
2764
2765 static int
2766 ordered_basic(struct test *t)
2767 {
2768         return parallel_basic(t, 1);
2769 }
2770
2771 static int
2772 unordered_basic(struct test *t)
2773 {
2774         return parallel_basic(t, 0);
2775 }
2776
2777 static int
2778 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2779 {
2780         const struct rte_event new_ev = {
2781                         .op = RTE_EVENT_OP_NEW
2782                         /* all other fields zero */
2783         };
2784         struct rte_event ev = new_ev;
2785         unsigned int rx_port = 0; /* port we get the first flow on */
2786         char rx_port_used_stat[64];
2787         char rx_port_free_stat[64];
2788         char other_port_used_stat[64];
2789
2790         if (init(t, 1, 2) < 0 ||
2791                         create_ports(t, 2) < 0 ||
2792                         create_atomic_qids(t, 1) < 0) {
2793                 printf("%d: Error initializing device\n", __LINE__);
2794                 return -1;
2795         }
2796         int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2797         if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2798                         nb_links != 1) {
2799                 printf("%d: Error links queue to ports\n", __LINE__);
2800                 goto err;
2801         }
2802         if (rte_event_dev_start(evdev) < 0) {
2803                 printf("%d: Error with start call\n", __LINE__);
2804                 goto err;
2805         }
2806
2807         /* send one packet and see where it goes, port 0 or 1 */
2808         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2809                 printf("%d: Error doing first enqueue\n", __LINE__);
2810                 goto err;
2811         }
2812         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2813
2814         if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2815                         != 1)
2816                 rx_port = 1;
2817
2818         snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2819                         "port_%u_cq_ring_used", rx_port);
2820         snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2821                         "port_%u_cq_ring_free", rx_port);
2822         snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2823                         "port_%u_cq_ring_used", rx_port ^ 1);
2824         if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2825                         != 1) {
2826                 printf("%d: Error, first event not scheduled\n", __LINE__);
2827                 goto err;
2828         }
2829
2830         /* now fill up the rx port's queue with one flow to cause HOLB */
2831         do {
2832                 ev = new_ev;
2833                 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2834                         printf("%d: Error with enqueue\n", __LINE__);
2835                         goto err;
2836                 }
2837                 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2838         } while (rte_event_dev_xstats_by_name_get(evdev,
2839                                 rx_port_free_stat, NULL) != 0);
2840
2841         /* one more packet, which needs to stay in IQ - i.e. HOLB */
2842         ev = new_ev;
2843         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2844                 printf("%d: Error with enqueue\n", __LINE__);
2845                 goto err;
2846         }
2847         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2848
2849         /* check that the other port still has an empty CQ */
2850         if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2851                         != 0) {
2852                 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2853                 goto err;
2854         }
2855         /* check IQ now has one packet */
2856         if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2857                         != 1) {
2858                 printf("%d: Error, QID does not have exactly 1 packet\n",
2859                         __LINE__);
2860                 goto err;
2861         }
2862
2863         /* send another flow, which should pass the other IQ entry */
2864         ev = new_ev;
2865         ev.flow_id = 1;
2866         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2867                 printf("%d: Error with enqueue\n", __LINE__);
2868                 goto err;
2869         }
2870         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2871
2872         if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2873                         != 1) {
2874                 printf("%d: Error, second flow did not pass out first\n",
2875                         __LINE__);
2876                 goto err;
2877         }
2878
2879         if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2880                         != 1) {
2881                 printf("%d: Error, QID does not have exactly 1 packet\n",
2882                         __LINE__);
2883                 goto err;
2884         }
2885         cleanup(t);
2886         return 0;
2887 err:
2888         rte_event_dev_dump(evdev, stdout);
2889         cleanup(t);
2890         return -1;
2891 }
2892
2893 static void
2894 flush(uint8_t dev_id __rte_unused, struct rte_event event, void *arg)
2895 {
2896         *((uint8_t *) arg) += (event.u64 == 0xCA11BACC) ? 1 : 0;
2897 }
2898
2899 static int
2900 dev_stop_flush(struct test *t) /* test to check we can properly flush events */
2901 {
2902         const struct rte_event new_ev = {
2903                 .op = RTE_EVENT_OP_NEW,
2904                 .u64 = 0xCA11BACC,
2905                 .queue_id = 0
2906         };
2907         struct rte_event ev = new_ev;
2908         uint8_t count = 0;
2909         int i;
2910
2911         if (init(t, 1, 1) < 0 ||
2912             create_ports(t, 1) < 0 ||
2913             create_atomic_qids(t, 1) < 0) {
2914                 printf("%d: Error initializing device\n", __LINE__);
2915                 return -1;
2916         }
2917
2918         /* Link the queue so *_start() doesn't error out */
2919         if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1) {
2920                 printf("%d: Error linking queue to port\n", __LINE__);
2921                 goto err;
2922         }
2923
2924         if (rte_event_dev_start(evdev) < 0) {
2925                 printf("%d: Error with start call\n", __LINE__);
2926                 goto err;
2927         }
2928
2929         for (i = 0; i < DEQUEUE_DEPTH + 1; i++) {
2930                 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2931                         printf("%d: Error enqueuing events\n", __LINE__);
2932                         goto err;
2933                 }
2934         }
2935
2936         /* Schedule the events from the port to the IQ. At least one event
2937          * should be remaining in the queue.
2938          */
2939         rte_service_run_iter_on_app_lcore(t->service_id, 1);
2940
2941         if (rte_event_dev_stop_flush_callback_register(evdev, flush, &count)) {
2942                 printf("%d: Error installing the flush callback\n", __LINE__);
2943                 goto err;
2944         }
2945
2946         cleanup(t);
2947
2948         if (count == 0) {
2949                 printf("%d: Error executing the flush callback\n", __LINE__);
2950                 goto err;
2951         }
2952
2953         if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
2954                 printf("%d: Error uninstalling the flush callback\n", __LINE__);
2955                 goto err;
2956         }
2957
2958         return 0;
2959 err:
2960         rte_event_dev_dump(evdev, stdout);
2961         cleanup(t);
2962         return -1;
2963 }
2964
2965 static int
2966 worker_loopback_worker_fn(void *arg)
2967 {
2968         struct test *t = arg;
2969         uint8_t port = t->port[1];
2970         int count = 0;
2971         int enqd;
2972
2973         /*
2974          * Takes packets from the input port and then loops them back through
2975          * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2976          * so each packet goes through 8*16 = 128 times.
2977          */
2978         printf("%d: \tWorker function started\n", __LINE__);
2979         while (count < NUM_PACKETS) {
2980 #define BURST_SIZE 32
2981                 struct rte_event ev[BURST_SIZE];
2982                 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2983                                 BURST_SIZE, 0);
2984                 if (nb_rx == 0) {
2985                         rte_pause();
2986                         continue;
2987                 }
2988
2989                 for (i = 0; i < nb_rx; i++) {
2990                         ev[i].queue_id++;
2991                         if (ev[i].queue_id != 8) {
2992                                 ev[i].op = RTE_EVENT_OP_FORWARD;
2993                                 enqd = rte_event_enqueue_burst(evdev, port,
2994                                                 &ev[i], 1);
2995                                 if (enqd != 1) {
2996                                         printf("%d: Can't enqueue FWD!!\n",
2997                                                         __LINE__);
2998                                         return -1;
2999                                 }
3000                                 continue;
3001                         }
3002
3003                         ev[i].queue_id = 0;
3004                         (*counter_field(ev[i].mbuf))++;
3005                         if (*counter_field(ev[i].mbuf) != 16) {
3006                                 ev[i].op = RTE_EVENT_OP_FORWARD;
3007                                 enqd = rte_event_enqueue_burst(evdev, port,
3008                                                 &ev[i], 1);
3009                                 if (enqd != 1) {
3010                                         printf("%d: Can't enqueue FWD!!\n",
3011                                                         __LINE__);
3012                                         return -1;
3013                                 }
3014                                 continue;
3015                         }
3016                         /* we have hit 16 iterations through system - drop */
3017                         rte_pktmbuf_free(ev[i].mbuf);
3018                         count++;
3019                         ev[i].op = RTE_EVENT_OP_RELEASE;
3020                         enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
3021                         if (enqd != 1) {
3022                                 printf("%d drop enqueue failed\n", __LINE__);
3023                                 return -1;
3024                         }
3025                 }
3026         }
3027
3028         return 0;
3029 }
3030
3031 static int
3032 worker_loopback_producer_fn(void *arg)
3033 {
3034         struct test *t = arg;
3035         uint8_t port = t->port[0];
3036         uint64_t count = 0;
3037
3038         printf("%d: \tProducer function started\n", __LINE__);
3039         while (count < NUM_PACKETS) {
3040                 struct rte_mbuf *m = 0;
3041                 do {
3042                         m = rte_pktmbuf_alloc(t->mbuf_pool);
3043                 } while (m == NULL);
3044
3045                 *counter_field(m) = 0;
3046
3047                 struct rte_event ev = {
3048                                 .op = RTE_EVENT_OP_NEW,
3049                                 .queue_id = t->qid[0],
3050                                 .flow_id = (uintptr_t)m & 0xFFFF,
3051                                 .mbuf = m,
3052                 };
3053
3054                 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
3055                         while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
3056                                         1)
3057                                 rte_pause();
3058                 }
3059
3060                 count++;
3061         }
3062
3063         return 0;
3064 }
3065
3066 static int
3067 worker_loopback(struct test *t, uint8_t disable_implicit_release)
3068 {
3069         /* use a single producer core, and a worker core to see what happens
3070          * if the worker loops packets back multiple times
3071          */
3072         struct test_event_dev_stats stats;
3073         uint64_t print_cycles = 0, cycles = 0;
3074         uint64_t tx_pkts = 0;
3075         int err;
3076         int w_lcore, p_lcore;
3077
3078         static const struct rte_mbuf_dynfield counter_dynfield_desc = {
3079                 .name = "rte_event_sw_dynfield_selftest_counter",
3080                 .size = sizeof(counter_dynfield_t),
3081                 .align = __alignof__(counter_dynfield_t),
3082         };
3083         counter_dynfield_offset =
3084                 rte_mbuf_dynfield_register(&counter_dynfield_desc);
3085         if (counter_dynfield_offset < 0) {
3086                 printf("Error registering mbuf field\n");
3087                 return -rte_errno;
3088         }
3089
3090         if (init(t, 8, 2) < 0 ||
3091                         create_atomic_qids(t, 8) < 0) {
3092                 printf("%d: Error initializing device\n", __LINE__);
3093                 return -1;
3094         }
3095
3096         /* RX with low max events */
3097         static struct rte_event_port_conf conf = {
3098                         .dequeue_depth = 32,
3099                         .enqueue_depth = 64,
3100         };
3101         /* beware: this cannot be initialized in the static above as it would
3102          * only be initialized once - and this needs to be set for multiple runs
3103          */
3104         conf.new_event_threshold = 512;
3105         conf.event_port_cfg = disable_implicit_release ?
3106                 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
3107
3108         if (rte_event_port_setup(evdev, 0, &conf) < 0) {
3109                 printf("Error setting up RX port\n");
3110                 return -1;
3111         }
3112         t->port[0] = 0;
3113         /* TX with higher max events */
3114         conf.new_event_threshold = 4096;
3115         if (rte_event_port_setup(evdev, 1, &conf) < 0) {
3116                 printf("Error setting up TX port\n");
3117                 return -1;
3118         }
3119         t->port[1] = 1;
3120
3121         /* CQ mapping to QID */
3122         err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
3123         if (err != 8) { /* should have mapped all queues*/
3124                 printf("%d: error mapping port 2 to all qids\n", __LINE__);
3125                 return -1;
3126         }
3127
3128         if (rte_event_dev_start(evdev) < 0) {
3129                 printf("%d: Error with start call\n", __LINE__);
3130                 return -1;
3131         }
3132
3133         p_lcore = rte_get_next_lcore(
3134                         /* start core */ -1,
3135                         /* skip main */ 1,
3136                         /* wrap */ 0);
3137         w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
3138
3139         rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
3140         rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
3141
3142         print_cycles = cycles = rte_get_timer_cycles();
3143         while (rte_eal_get_lcore_state(p_lcore) != WAIT ||
3144                         rte_eal_get_lcore_state(w_lcore) != WAIT) {
3145
3146                 rte_service_run_iter_on_app_lcore(t->service_id, 1);
3147
3148                 uint64_t new_cycles = rte_get_timer_cycles();
3149
3150                 if (new_cycles - print_cycles > rte_get_timer_hz()) {
3151                         test_event_dev_stats_get(evdev, &stats);
3152                         printf(
3153                                 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
3154                                 __LINE__, stats.rx_pkts, stats.tx_pkts);
3155
3156                         print_cycles = new_cycles;
3157                 }
3158                 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
3159                         test_event_dev_stats_get(evdev, &stats);
3160                         if (stats.tx_pkts == tx_pkts) {
3161                                 rte_event_dev_dump(evdev, stdout);
3162                                 printf("Dumping xstats:\n");
3163                                 xstats_print();
3164                                 printf(
3165                                         "%d: No schedules for seconds, deadlock\n",
3166                                         __LINE__);
3167                                 return -1;
3168                         }
3169                         tx_pkts = stats.tx_pkts;
3170                         cycles = new_cycles;
3171                 }
3172         }
3173         rte_service_run_iter_on_app_lcore(t->service_id, 1);
3174         /* ensure all completions are flushed */
3175
3176         rte_eal_mp_wait_lcore();
3177
3178         cleanup(t);
3179         return 0;
3180 }
3181
3182 static struct rte_mempool *eventdev_func_mempool;
3183
3184 int
3185 test_sw_eventdev(void)
3186 {
3187         struct test *t;
3188         int ret;
3189
3190         t = malloc(sizeof(struct test));
3191         if (t == NULL)
3192                 return -1;
3193         /* manually initialize the op, older gcc's complain on static
3194          * initialization of struct elements that are a bitfield.
3195          */
3196         release_ev.op = RTE_EVENT_OP_RELEASE;
3197
3198         const char *eventdev_name = "event_sw";
3199         evdev = rte_event_dev_get_dev_id(eventdev_name);
3200         if (evdev < 0) {
3201                 printf("%d: Eventdev %s not found - creating.\n",
3202                                 __LINE__, eventdev_name);
3203                 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3204                         printf("Error creating eventdev\n");
3205                         goto test_fail;
3206                 }
3207                 evdev = rte_event_dev_get_dev_id(eventdev_name);
3208                 if (evdev < 0) {
3209                         printf("Error finding newly created eventdev\n");
3210                         goto test_fail;
3211                 }
3212         }
3213
3214         if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
3215                 printf("Failed to get service ID for software event dev\n");
3216                 goto test_fail;
3217         }
3218
3219         rte_service_runstate_set(t->service_id, 1);
3220         rte_service_set_runstate_mapped_check(t->service_id, 0);
3221
3222         /* Only create mbuf pool once, reuse for each test run */
3223         if (!eventdev_func_mempool) {
3224                 eventdev_func_mempool = rte_pktmbuf_pool_create(
3225                                 "EVENTDEV_SW_SA_MBUF_POOL",
3226                                 (1<<12), /* 4k buffers */
3227                                 32 /*MBUF_CACHE_SIZE*/,
3228                                 0,
3229                                 512, /* use very small mbufs */
3230                                 rte_socket_id());
3231                 if (!eventdev_func_mempool) {
3232                         printf("ERROR creating mempool\n");
3233                         goto test_fail;
3234                 }
3235         }
3236         t->mbuf_pool = eventdev_func_mempool;
3237         printf("*** Running Single Directed Packet test...\n");
3238         ret = test_single_directed_packet(t);
3239         if (ret != 0) {
3240                 printf("ERROR - Single Directed Packet test FAILED.\n");
3241                 goto test_fail;
3242         }
3243         printf("*** Running Directed Forward Credit test...\n");
3244         ret = test_directed_forward_credits(t);
3245         if (ret != 0) {
3246                 printf("ERROR - Directed Forward Credit test FAILED.\n");
3247                 goto test_fail;
3248         }
3249         printf("*** Running Single Load Balanced Packet test...\n");
3250         ret = single_packet(t);
3251         if (ret != 0) {
3252                 printf("ERROR - Single Packet test FAILED.\n");
3253                 goto test_fail;
3254         }
3255         printf("*** Running Unordered Basic test...\n");
3256         ret = unordered_basic(t);
3257         if (ret != 0) {
3258                 printf("ERROR -  Unordered Basic test FAILED.\n");
3259                 goto test_fail;
3260         }
3261         printf("*** Running Ordered Basic test...\n");
3262         ret = ordered_basic(t);
3263         if (ret != 0) {
3264                 printf("ERROR -  Ordered Basic test FAILED.\n");
3265                 goto test_fail;
3266         }
3267         printf("*** Running Burst Packets test...\n");
3268         ret = burst_packets(t);
3269         if (ret != 0) {
3270                 printf("ERROR - Burst Packets test FAILED.\n");
3271                 goto test_fail;
3272         }
3273         printf("*** Running Load Balancing test...\n");
3274         ret = load_balancing(t);
3275         if (ret != 0) {
3276                 printf("ERROR - Load Balancing test FAILED.\n");
3277                 goto test_fail;
3278         }
3279         printf("*** Running Prioritized Directed test...\n");
3280         ret = test_priority_directed(t);
3281         if (ret != 0) {
3282                 printf("ERROR - Prioritized Directed test FAILED.\n");
3283                 goto test_fail;
3284         }
3285         printf("*** Running Prioritized Atomic test...\n");
3286         ret = test_priority_atomic(t);
3287         if (ret != 0) {
3288                 printf("ERROR - Prioritized Atomic test FAILED.\n");
3289                 goto test_fail;
3290         }
3291
3292         printf("*** Running Prioritized Ordered test...\n");
3293         ret = test_priority_ordered(t);
3294         if (ret != 0) {
3295                 printf("ERROR - Prioritized Ordered test FAILED.\n");
3296                 goto test_fail;
3297         }
3298         printf("*** Running Prioritized Unordered test...\n");
3299         ret = test_priority_unordered(t);
3300         if (ret != 0) {
3301                 printf("ERROR - Prioritized Unordered test FAILED.\n");
3302                 goto test_fail;
3303         }
3304         printf("*** Running Invalid QID test...\n");
3305         ret = invalid_qid(t);
3306         if (ret != 0) {
3307                 printf("ERROR - Invalid QID test FAILED.\n");
3308                 goto test_fail;
3309         }
3310         printf("*** Running Load Balancing History test...\n");
3311         ret = load_balancing_history(t);
3312         if (ret != 0) {
3313                 printf("ERROR - Load Balancing History test FAILED.\n");
3314                 goto test_fail;
3315         }
3316         printf("*** Running Inflight Count test...\n");
3317         ret = inflight_counts(t);
3318         if (ret != 0) {
3319                 printf("ERROR - Inflight Count test FAILED.\n");
3320                 goto test_fail;
3321         }
3322         printf("*** Running Abuse Inflights test...\n");
3323         ret = abuse_inflights(t);
3324         if (ret != 0) {
3325                 printf("ERROR - Abuse Inflights test FAILED.\n");
3326                 goto test_fail;
3327         }
3328         printf("*** Running XStats test...\n");
3329         ret = xstats_tests(t);
3330         if (ret != 0) {
3331                 printf("ERROR - XStats test FAILED.\n");
3332                 goto test_fail;
3333         }
3334         printf("*** Running XStats ID Reset test...\n");
3335         ret = xstats_id_reset_tests(t);
3336         if (ret != 0) {
3337                 printf("ERROR - XStats ID Reset test FAILED.\n");
3338                 goto test_fail;
3339         }
3340         printf("*** Running XStats Brute Force test...\n");
3341         ret = xstats_brute_force(t);
3342         if (ret != 0) {
3343                 printf("ERROR - XStats Brute Force test FAILED.\n");
3344                 goto test_fail;
3345         }
3346         printf("*** Running XStats ID Abuse test...\n");
3347         ret = xstats_id_abuse_tests(t);
3348         if (ret != 0) {
3349                 printf("ERROR - XStats ID Abuse test FAILED.\n");
3350                 goto test_fail;
3351         }
3352         printf("*** Running QID Priority test...\n");
3353         ret = qid_priorities(t);
3354         if (ret != 0) {
3355                 printf("ERROR - QID Priority test FAILED.\n");
3356                 goto test_fail;
3357         }
3358         printf("*** Running Unlink-in-progress test...\n");
3359         ret = unlink_in_progress(t);
3360         if (ret != 0) {
3361                 printf("ERROR - Unlink in progress test FAILED.\n");
3362                 goto test_fail;
3363         }
3364         printf("*** Running Ordered Reconfigure test...\n");
3365         ret = ordered_reconfigure(t);
3366         if (ret != 0) {
3367                 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3368                 goto test_fail;
3369         }
3370         printf("*** Running Port LB Single Reconfig test...\n");
3371         ret = port_single_lb_reconfig(t);
3372         if (ret != 0) {
3373                 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3374                 goto test_fail;
3375         }
3376         printf("*** Running Port Reconfig Credits test...\n");
3377         ret = port_reconfig_credits(t);
3378         if (ret != 0) {
3379                 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3380                 goto test_fail;
3381         }
3382         printf("*** Running Head-of-line-blocking test...\n");
3383         ret = holb(t);
3384         if (ret != 0) {
3385                 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3386                 goto test_fail;
3387         }
3388         printf("*** Running Stop Flush test...\n");
3389         ret = dev_stop_flush(t);
3390         if (ret != 0) {
3391                 printf("ERROR - Stop Flush test FAILED.\n");
3392                 goto test_fail;
3393         }
3394         if (rte_lcore_count() >= 3) {
3395                 printf("*** Running Worker loopback test...\n");
3396                 ret = worker_loopback(t, 0);
3397                 if (ret != 0) {
3398                         printf("ERROR - Worker loopback test FAILED.\n");
3399                         return ret;
3400                 }
3401
3402                 printf("*** Running Worker loopback test (implicit release disabled)...\n");
3403                 ret = worker_loopback(t, 1);
3404                 if (ret != 0) {
3405                         printf("ERROR - Worker loopback test FAILED.\n");
3406                         goto test_fail;
3407                 }
3408         } else {
3409                 printf("### Not enough cores for worker loopback tests.\n");
3410                 printf("### Need at least 3 cores for the tests.\n");
3411         }
3412
3413         /*
3414          * Free test instance, leaving mempool initialized, and a pointer to it
3415          * in static eventdev_func_mempool, as it is re-used on re-runs
3416          */
3417         free(t);
3418
3419         printf("SW Eventdev Selftest Successful.\n");
3420         return 0;
3421 test_fail:
3422         free(t);
3423         printf("SW Eventdev Selftest Failed.\n");
3424         return -1;
3425 }