eventdev: hide timer adapter PMD file
[dpdk.git] / drivers / event / octeontx2 / otx2_evdev_selftest.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_debug.h>
9 #include <rte_eal.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
13 #include <rte_launch.h>
14 #include <rte_lcore.h>
15 #include <rte_mbuf.h>
16 #include <rte_malloc.h>
17 #include <rte_memcpy.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
20 #include <rte_test.h>
21
22 #include "otx2_evdev.h"
23
24 #define NUM_PACKETS (1024)
25 #define MAX_EVENTS  (1024)
26
27 #define OCTEONTX2_TEST_RUN(setup, teardown, test) \
28         octeontx_test_run(setup, teardown, test, #test)
29
30 static int total;
31 static int passed;
32 static int failed;
33 static int unsupported;
34
35 static int evdev;
36 static struct rte_mempool *eventdev_test_mempool;
37
38 struct event_attr {
39         uint32_t flow_id;
40         uint8_t event_type;
41         uint8_t sub_event_type;
42         uint8_t sched_type;
43         uint8_t queue;
44         uint8_t port;
45 };
46
47 static uint32_t seqn_list_index;
48 static int seqn_list[NUM_PACKETS];
49
50 static inline void
51 seqn_list_init(void)
52 {
53         RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
54         memset(seqn_list, 0, sizeof(seqn_list));
55         seqn_list_index = 0;
56 }
57
58 static inline int
59 seqn_list_update(int val)
60 {
61         if (seqn_list_index >= NUM_PACKETS)
62                 return -1;
63
64         seqn_list[seqn_list_index++] = val;
65         rte_smp_wmb();
66         return 0;
67 }
68
69 static inline int
70 seqn_list_check(int limit)
71 {
72         int i;
73
74         for (i = 0; i < limit; i++) {
75                 if (seqn_list[i] != i) {
76                         otx2_err("Seqn mismatch %d %d", seqn_list[i], i);
77                         return -1;
78                 }
79         }
80         return 0;
81 }
82
83 struct test_core_param {
84         rte_atomic32_t *total_events;
85         uint64_t dequeue_tmo_ticks;
86         uint8_t port;
87         uint8_t sched_type;
88 };
89
90 static int
91 testsuite_setup(void)
92 {
93         const char *eventdev_name = "event_octeontx2";
94
95         evdev = rte_event_dev_get_dev_id(eventdev_name);
96         if (evdev < 0) {
97                 otx2_err("%d: Eventdev %s not found", __LINE__, eventdev_name);
98                 return -1;
99         }
100         return 0;
101 }
102
103 static void
104 testsuite_teardown(void)
105 {
106         rte_event_dev_close(evdev);
107 }
108
109 static inline void
110 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
111                                 struct rte_event_dev_info *info)
112 {
113         memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
114         dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
115         dev_conf->nb_event_ports = info->max_event_ports;
116         dev_conf->nb_event_queues = info->max_event_queues;
117         dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
118         dev_conf->nb_event_port_dequeue_depth =
119                         info->max_event_port_dequeue_depth;
120         dev_conf->nb_event_port_enqueue_depth =
121                         info->max_event_port_enqueue_depth;
122         dev_conf->nb_event_port_enqueue_depth =
123                         info->max_event_port_enqueue_depth;
124         dev_conf->nb_events_limit =
125                         info->max_num_events;
126 }
127
128 enum {
129         TEST_EVENTDEV_SETUP_DEFAULT,
130         TEST_EVENTDEV_SETUP_PRIORITY,
131         TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
132 };
133
134 static inline int
135 _eventdev_setup(int mode)
136 {
137         const char *pool_name = "evdev_octeontx_test_pool";
138         struct rte_event_dev_config dev_conf;
139         struct rte_event_dev_info info;
140         int i, ret;
141
142         /* Create and destrory pool for each test case to make it standalone */
143         eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name, MAX_EVENTS,
144                                                         0, 0, 512,
145                                                         rte_socket_id());
146         if (!eventdev_test_mempool) {
147                 otx2_err("ERROR creating mempool");
148                 return -1;
149         }
150
151         ret = rte_event_dev_info_get(evdev, &info);
152         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
153
154         devconf_set_default_sane_values(&dev_conf, &info);
155         if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
156                 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
157
158         ret = rte_event_dev_configure(evdev, &dev_conf);
159         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
160
161         uint32_t queue_count;
162         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
163                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
164                                 "Queue count get failed");
165
166         if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
167                 if (queue_count > 8)
168                         queue_count = 8;
169
170                 /* Configure event queues(0 to n) with
171                  * RTE_EVENT_DEV_PRIORITY_HIGHEST to
172                  * RTE_EVENT_DEV_PRIORITY_LOWEST
173                  */
174                 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
175                                 queue_count;
176                 for (i = 0; i < (int)queue_count; i++) {
177                         struct rte_event_queue_conf queue_conf;
178
179                         ret = rte_event_queue_default_conf_get(evdev, i,
180                                                                &queue_conf);
181                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
182                                                 i);
183                         queue_conf.priority = i * step;
184                         ret = rte_event_queue_setup(evdev, i, &queue_conf);
185                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
186                                                 i);
187                 }
188
189         } else {
190                 /* Configure event queues with default priority */
191                 for (i = 0; i < (int)queue_count; i++) {
192                         ret = rte_event_queue_setup(evdev, i, NULL);
193                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
194                                                 i);
195                 }
196         }
197         /* Configure event ports */
198         uint32_t port_count;
199         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
200                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &port_count),
201                                 "Port count get failed");
202         for (i = 0; i < (int)port_count; i++) {
203                 ret = rte_event_port_setup(evdev, i, NULL);
204                 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
205                 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
206                 RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
207                                 i);
208         }
209
210         ret = rte_event_dev_start(evdev);
211         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
212
213         return 0;
214 }
215
216 static inline int
217 eventdev_setup(void)
218 {
219         return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
220 }
221
222 static inline int
223 eventdev_setup_priority(void)
224 {
225         return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
226 }
227
228 static inline int
229 eventdev_setup_dequeue_timeout(void)
230 {
231         return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
232 }
233
234 static inline void
235 eventdev_teardown(void)
236 {
237         rte_event_dev_stop(evdev);
238         rte_mempool_free(eventdev_test_mempool);
239 }
240
241 static inline void
242 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
243                                  uint32_t flow_id, uint8_t event_type,
244                                  uint8_t sub_event_type, uint8_t sched_type,
245                                  uint8_t queue, uint8_t port)
246 {
247         struct event_attr *attr;
248
249         /* Store the event attributes in mbuf for future reference */
250         attr = rte_pktmbuf_mtod(m, struct event_attr *);
251         attr->flow_id = flow_id;
252         attr->event_type = event_type;
253         attr->sub_event_type = sub_event_type;
254         attr->sched_type = sched_type;
255         attr->queue = queue;
256         attr->port = port;
257
258         ev->flow_id = flow_id;
259         ev->sub_event_type = sub_event_type;
260         ev->event_type = event_type;
261         /* Inject the new event */
262         ev->op = RTE_EVENT_OP_NEW;
263         ev->sched_type = sched_type;
264         ev->queue_id = queue;
265         ev->mbuf = m;
266 }
267
268 static inline int
269 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
270               uint8_t sched_type, uint8_t queue, uint8_t port,
271               unsigned int events)
272 {
273         struct rte_mbuf *m;
274         unsigned int i;
275
276         for (i = 0; i < events; i++) {
277                 struct rte_event ev = {.event = 0, .u64 = 0};
278
279                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
280                 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
281
282                 *rte_event_pmd_selftest_seqn(m) = i;
283                 update_event_and_validation_attr(m, &ev, flow_id, event_type,
284                                                  sub_event_type, sched_type,
285                                                  queue, port);
286                 rte_event_enqueue_burst(evdev, port, &ev, 1);
287         }
288         return 0;
289 }
290
291 static inline int
292 check_excess_events(uint8_t port)
293 {
294         uint16_t valid_event;
295         struct rte_event ev;
296         int i;
297
298         /* Check for excess events, try for a few times and exit */
299         for (i = 0; i < 32; i++) {
300                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
301
302                 RTE_TEST_ASSERT_SUCCESS(valid_event,
303                                         "Unexpected valid event=%d",
304                                         *rte_event_pmd_selftest_seqn(ev.mbuf));
305         }
306         return 0;
307 }
308
309 static inline int
310 generate_random_events(const unsigned int total_events)
311 {
312         struct rte_event_dev_info info;
313         uint32_t queue_count;
314         unsigned int i;
315         int ret;
316
317         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
318                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
319                                 "Queue count get failed");
320
321         ret = rte_event_dev_info_get(evdev, &info);
322         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
323         for (i = 0; i < total_events; i++) {
324                 ret = inject_events(
325                         rte_rand() % info.max_event_queue_flows /*flow_id */,
326                         RTE_EVENT_TYPE_CPU /* event_type */,
327                         rte_rand() % 256 /* sub_event_type */,
328                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
329                         rte_rand() % queue_count /* queue */,
330                         0 /* port */,
331                         1 /* events */);
332                 if (ret)
333                         return -1;
334         }
335         return ret;
336 }
337
338
339 static inline int
340 validate_event(struct rte_event *ev)
341 {
342         struct event_attr *attr;
343
344         attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
345         RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
346                               "flow_id mismatch enq=%d deq =%d",
347                               attr->flow_id, ev->flow_id);
348         RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
349                               "event_type mismatch enq=%d deq =%d",
350                               attr->event_type, ev->event_type);
351         RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
352                               "sub_event_type mismatch enq=%d deq =%d",
353                               attr->sub_event_type, ev->sub_event_type);
354         RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
355                               "sched_type mismatch enq=%d deq =%d",
356                               attr->sched_type, ev->sched_type);
357         RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
358                               "queue mismatch enq=%d deq =%d",
359                               attr->queue, ev->queue_id);
360         return 0;
361 }
362
363 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
364                                  struct rte_event *ev);
365
366 static inline int
367 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
368 {
369         uint32_t events = 0, forward_progress_cnt = 0, index = 0;
370         uint16_t valid_event;
371         struct rte_event ev;
372         int ret;
373
374         while (1) {
375                 if (++forward_progress_cnt > UINT16_MAX) {
376                         otx2_err("Detected deadlock");
377                         return -1;
378                 }
379
380                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
381                 if (!valid_event)
382                         continue;
383
384                 forward_progress_cnt = 0;
385                 ret = validate_event(&ev);
386                 if (ret)
387                         return -1;
388
389                 if (fn != NULL) {
390                         ret = fn(index, port, &ev);
391                         RTE_TEST_ASSERT_SUCCESS(ret,
392                                 "Failed to validate test specific event");
393                 }
394
395                 ++index;
396
397                 rte_pktmbuf_free(ev.mbuf);
398                 if (++events >= total_events)
399                         break;
400         }
401
402         return check_excess_events(port);
403 }
404
405 static int
406 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
407 {
408         RTE_SET_USED(port);
409         RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),
410                 "index=%d != seqn=%d",
411                 index, *rte_event_pmd_selftest_seqn(ev->mbuf));
412         return 0;
413 }
414
415 static inline int
416 test_simple_enqdeq(uint8_t sched_type)
417 {
418         int ret;
419
420         ret = inject_events(0 /*flow_id */,
421                             RTE_EVENT_TYPE_CPU /* event_type */,
422                             0 /* sub_event_type */,
423                             sched_type,
424                             0 /* queue */,
425                             0 /* port */,
426                             MAX_EVENTS);
427         if (ret)
428                 return -1;
429
430         return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
431 }
432
433 static int
434 test_simple_enqdeq_ordered(void)
435 {
436         return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
437 }
438
439 static int
440 test_simple_enqdeq_atomic(void)
441 {
442         return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
443 }
444
445 static int
446 test_simple_enqdeq_parallel(void)
447 {
448         return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
449 }
450
451 /*
452  * Generate a prescribed number of events and spread them across available
453  * queues. On dequeue, using single event port(port 0) verify the enqueued
454  * event attributes
455  */
456 static int
457 test_multi_queue_enq_single_port_deq(void)
458 {
459         int ret;
460
461         ret = generate_random_events(MAX_EVENTS);
462         if (ret)
463                 return -1;
464
465         return consume_events(0 /* port */, MAX_EVENTS, NULL);
466 }
467
468 /*
469  * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
470  * operation
471  *
472  * For example, Inject 32 events over 0..7 queues
473  * enqueue events 0, 8, 16, 24 in queue 0
474  * enqueue events 1, 9, 17, 25 in queue 1
475  * ..
476  * ..
477  * enqueue events 7, 15, 23, 31 in queue 7
478  *
479  * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
480  * order from queue0(highest priority) to queue7(lowest_priority)
481  */
482 static int
483 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
484 {
485         uint32_t queue_count;
486
487         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
488                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
489                                 "Queue count get failed");
490         if (queue_count > 8)
491                 queue_count = 8;
492         uint32_t range = MAX_EVENTS / queue_count;
493         uint32_t expected_val = (index % range) * queue_count;
494
495         expected_val += ev->queue_id;
496         RTE_SET_USED(port);
497         RTE_TEST_ASSERT_EQUAL(
498                 *rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,
499                 "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
500                 *rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val,
501                 range, queue_count, MAX_EVENTS);
502         return 0;
503 }
504
505 static int
506 test_multi_queue_priority(void)
507 {
508         int i, max_evts_roundoff;
509         /* See validate_queue_priority() comments for priority validate logic */
510         uint32_t queue_count;
511         struct rte_mbuf *m;
512         uint8_t queue;
513
514         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
515                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
516                                 "Queue count get failed");
517         if (queue_count > 8)
518                 queue_count = 8;
519         max_evts_roundoff  = MAX_EVENTS / queue_count;
520         max_evts_roundoff *= queue_count;
521
522         for (i = 0; i < max_evts_roundoff; i++) {
523                 struct rte_event ev = {.event = 0, .u64 = 0};
524
525                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
526                 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
527
528                 *rte_event_pmd_selftest_seqn(m) = i;
529                 queue = i % queue_count;
530                 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
531                                                  0, RTE_SCHED_TYPE_PARALLEL,
532                                                  queue, 0);
533                 rte_event_enqueue_burst(evdev, 0, &ev, 1);
534         }
535
536         return consume_events(0, max_evts_roundoff, validate_queue_priority);
537 }
538
539 static int
540 worker_multi_port_fn(void *arg)
541 {
542         struct test_core_param *param = arg;
543         rte_atomic32_t *total_events = param->total_events;
544         uint8_t port = param->port;
545         uint16_t valid_event;
546         struct rte_event ev;
547         int ret;
548
549         while (rte_atomic32_read(total_events) > 0) {
550                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
551                 if (!valid_event)
552                         continue;
553
554                 ret = validate_event(&ev);
555                 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
556                 rte_pktmbuf_free(ev.mbuf);
557                 rte_atomic32_sub(total_events, 1);
558         }
559
560         return 0;
561 }
562
563 static inline int
564 wait_workers_to_join(const rte_atomic32_t *count)
565 {
566         uint64_t cycles, print_cycles;
567
568         cycles = rte_get_timer_cycles();
569         print_cycles = cycles;
570         while (rte_atomic32_read(count)) {
571                 uint64_t new_cycles = rte_get_timer_cycles();
572
573                 if (new_cycles - print_cycles > rte_get_timer_hz()) {
574                         otx2_err("Events %d", rte_atomic32_read(count));
575                         print_cycles = new_cycles;
576                 }
577                 if (new_cycles - cycles > rte_get_timer_hz() * 10000000000) {
578                         otx2_err("No schedules for seconds, deadlock (%d)",
579                                  rte_atomic32_read(count));
580                         rte_event_dev_dump(evdev, stdout);
581                         cycles = new_cycles;
582                         return -1;
583                 }
584         }
585         rte_eal_mp_wait_lcore();
586
587         return 0;
588 }
589
590 static inline int
591 launch_workers_and_wait(int (*main_thread)(void *),
592                         int (*worker_thread)(void *), uint32_t total_events,
593                         uint8_t nb_workers, uint8_t sched_type)
594 {
595         rte_atomic32_t atomic_total_events;
596         struct test_core_param *param;
597         uint64_t dequeue_tmo_ticks;
598         uint8_t port = 0;
599         int w_lcore;
600         int ret;
601
602         if (!nb_workers)
603                 return 0;
604
605         rte_atomic32_set(&atomic_total_events, total_events);
606         seqn_list_init();
607
608         param = malloc(sizeof(struct test_core_param) * nb_workers);
609         if (!param)
610                 return -1;
611
612         ret = rte_event_dequeue_timeout_ticks(evdev,
613                                               rte_rand() % 10000000/* 10ms */,
614                                               &dequeue_tmo_ticks);
615         if (ret) {
616                 free(param);
617                 return -1;
618         }
619
620         param[0].total_events = &atomic_total_events;
621         param[0].sched_type = sched_type;
622         param[0].port = 0;
623         param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
624         rte_wmb();
625
626         w_lcore = rte_get_next_lcore(
627                         /* start core */ -1,
628                         /* skip main */ 1,
629                         /* wrap */ 0);
630         rte_eal_remote_launch(main_thread, &param[0], w_lcore);
631
632         for (port = 1; port < nb_workers; port++) {
633                 param[port].total_events = &atomic_total_events;
634                 param[port].sched_type = sched_type;
635                 param[port].port = port;
636                 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
637                 rte_smp_wmb();
638                 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
639                 rte_eal_remote_launch(worker_thread, &param[port], w_lcore);
640         }
641
642         rte_smp_wmb();
643         ret = wait_workers_to_join(&atomic_total_events);
644         free(param);
645
646         return ret;
647 }
648
649 /*
650  * Generate a prescribed number of events and spread them across available
651  * queues. Dequeue the events through multiple ports and verify the enqueued
652  * event attributes
653  */
654 static int
655 test_multi_queue_enq_multi_port_deq(void)
656 {
657         const unsigned int total_events = MAX_EVENTS;
658         uint32_t nr_ports;
659         int ret;
660
661         ret = generate_random_events(total_events);
662         if (ret)
663                 return -1;
664
665         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
666                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
667                                 "Port count get failed");
668         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
669
670         if (!nr_ports) {
671                 otx2_err("Not enough ports=%d or workers=%d", nr_ports,
672                          rte_lcore_count() - 1);
673                 return 0;
674         }
675
676         return launch_workers_and_wait(worker_multi_port_fn,
677                                        worker_multi_port_fn, total_events,
678                                        nr_ports, 0xff /* invalid */);
679 }
680
681 static
682 void flush(uint8_t dev_id, struct rte_event event, void *arg)
683 {
684         unsigned int *count = arg;
685
686         RTE_SET_USED(dev_id);
687         if (event.event_type == RTE_EVENT_TYPE_CPU)
688                 *count = *count + 1;
689 }
690
691 static int
692 test_dev_stop_flush(void)
693 {
694         unsigned int total_events = MAX_EVENTS, count = 0;
695         int ret;
696
697         ret = generate_random_events(total_events);
698         if (ret)
699                 return -1;
700
701         ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
702         if (ret)
703                 return -2;
704         rte_event_dev_stop(evdev);
705         ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
706         if (ret)
707                 return -3;
708         RTE_TEST_ASSERT_EQUAL(total_events, count,
709                               "count mismatch total_events=%d count=%d",
710                               total_events, count);
711
712         return 0;
713 }
714
715 static int
716 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
717                                    struct rte_event *ev)
718 {
719         RTE_SET_USED(index);
720         RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
721                               "queue mismatch enq=%d deq =%d",
722                               port, ev->queue_id);
723
724         return 0;
725 }
726
727 /*
728  * Link queue x to port x and check correctness of link by checking
729  * queue_id == x on dequeue on the specific port x
730  */
731 static int
732 test_queue_to_port_single_link(void)
733 {
734         int i, nr_links, ret;
735         uint32_t queue_count;
736         uint32_t port_count;
737
738         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
739                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &port_count),
740                                 "Port count get failed");
741
742         /* Unlink all connections that created in eventdev_setup */
743         for (i = 0; i < (int)port_count; i++) {
744                 ret = rte_event_port_unlink(evdev, i, NULL, 0);
745                 RTE_TEST_ASSERT(ret >= 0,
746                                 "Failed to unlink all queues port=%d", i);
747         }
748
749         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
750                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
751                                 "Queue count get failed");
752
753         nr_links = RTE_MIN(port_count, queue_count);
754         const unsigned int total_events = MAX_EVENTS / nr_links;
755
756         /* Link queue x to port x and inject events to queue x through port x */
757         for (i = 0; i < nr_links; i++) {
758                 uint8_t queue = (uint8_t)i;
759
760                 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
761                 RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
762
763                 ret = inject_events(0x100 /*flow_id */,
764                                     RTE_EVENT_TYPE_CPU /* event_type */,
765                                     rte_rand() % 256 /* sub_event_type */,
766                                     rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
767                                     queue /* queue */, i /* port */,
768                                     total_events /* events */);
769                 if (ret)
770                         return -1;
771         }
772
773         /* Verify the events generated from correct queue */
774         for (i = 0; i < nr_links; i++) {
775                 ret = consume_events(i /* port */, total_events,
776                                      validate_queue_to_port_single_link);
777                 if (ret)
778                         return -1;
779         }
780
781         return 0;
782 }
783
784 static int
785 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
786                                   struct rte_event *ev)
787 {
788         RTE_SET_USED(index);
789         RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
790                               "queue mismatch enq=%d deq =%d",
791                               port, ev->queue_id);
792
793         return 0;
794 }
795
796 /*
797  * Link all even number of queues to port 0 and all odd number of queues to
798  * port 1 and verify the link connection on dequeue
799  */
800 static int
801 test_queue_to_port_multi_link(void)
802 {
803         int ret, port0_events = 0, port1_events = 0;
804         uint32_t nr_queues = 0;
805         uint32_t nr_ports = 0;
806         uint8_t queue, port;
807
808         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
809                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &nr_queues),
810                                 "Queue count get failed");
811         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
812                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &nr_queues),
813                                 "Queue count get failed");
814         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
815                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
816                                 "Port count get failed");
817
818         if (nr_ports < 2) {
819                 otx2_err("Not enough ports to test ports=%d", nr_ports);
820                 return 0;
821         }
822
823         /* Unlink all connections that created in eventdev_setup */
824         for (port = 0; port < nr_ports; port++) {
825                 ret = rte_event_port_unlink(evdev, port, NULL, 0);
826                 RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
827                                 port);
828         }
829
830         const unsigned int total_events = MAX_EVENTS / nr_queues;
831
832         /* Link all even number of queues to port0 and odd numbers to port 1*/
833         for (queue = 0; queue < nr_queues; queue++) {
834                 port = queue & 0x1;
835                 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
836                 RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
837                                 queue, port);
838
839                 ret = inject_events(0x100 /*flow_id */,
840                                     RTE_EVENT_TYPE_CPU /* event_type */,
841                                     rte_rand() % 256 /* sub_event_type */,
842                                     rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
843                                     queue /* queue */, port /* port */,
844                                     total_events /* events */);
845                 if (ret)
846                         return -1;
847
848                 if (port == 0)
849                         port0_events += total_events;
850                 else
851                         port1_events += total_events;
852         }
853
854         ret = consume_events(0 /* port */, port0_events,
855                              validate_queue_to_port_multi_link);
856         if (ret)
857                 return -1;
858         ret = consume_events(1 /* port */, port1_events,
859                              validate_queue_to_port_multi_link);
860         if (ret)
861                 return -1;
862
863         return 0;
864 }
865
866 static int
867 worker_flow_based_pipeline(void *arg)
868 {
869         struct test_core_param *param = arg;
870         uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
871         rte_atomic32_t *total_events = param->total_events;
872         uint8_t new_sched_type = param->sched_type;
873         uint8_t port = param->port;
874         uint16_t valid_event;
875         struct rte_event ev;
876
877         while (rte_atomic32_read(total_events) > 0) {
878                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
879                                                       dequeue_tmo_ticks);
880                 if (!valid_event)
881                         continue;
882
883                 /* Events from stage 0 */
884                 if (ev.sub_event_type == 0) {
885                         /* Move to atomic flow to maintain the ordering */
886                         ev.flow_id = 0x2;
887                         ev.event_type = RTE_EVENT_TYPE_CPU;
888                         ev.sub_event_type = 1; /* stage 1 */
889                         ev.sched_type = new_sched_type;
890                         ev.op = RTE_EVENT_OP_FORWARD;
891                         rte_event_enqueue_burst(evdev, port, &ev, 1);
892                 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
893                         uint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);
894
895                         if (seqn_list_update(seqn) == 0) {
896                                 rte_pktmbuf_free(ev.mbuf);
897                                 rte_atomic32_sub(total_events, 1);
898                         } else {
899                                 otx2_err("Failed to update seqn_list");
900                                 return -1;
901                         }
902                 } else {
903                         otx2_err("Invalid ev.sub_event_type = %d",
904                                  ev.sub_event_type);
905                         return -1;
906                 }
907         }
908         return 0;
909 }
910
911 static int
912 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
913                                     uint8_t out_sched_type)
914 {
915         const unsigned int total_events = MAX_EVENTS;
916         uint32_t nr_ports;
917         int ret;
918
919         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
920                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
921                                 "Port count get failed");
922         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
923
924         if (!nr_ports) {
925                 otx2_err("Not enough ports=%d or workers=%d", nr_ports,
926                          rte_lcore_count() - 1);
927                 return 0;
928         }
929
930         /* Injects events with a 0 sequence number to total_events */
931         ret = inject_events(0x1 /*flow_id */,
932                             RTE_EVENT_TYPE_CPU /* event_type */,
933                             0 /* sub_event_type (stage 0) */,
934                             in_sched_type,
935                             0 /* queue */,
936                             0 /* port */,
937                             total_events /* events */);
938         if (ret)
939                 return -1;
940
941         rte_mb();
942         ret = launch_workers_and_wait(worker_flow_based_pipeline,
943                                       worker_flow_based_pipeline, total_events,
944                                       nr_ports, out_sched_type);
945         if (ret)
946                 return -1;
947
948         if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
949             out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
950                 /* Check the events order maintained or not */
951                 return seqn_list_check(total_events);
952         }
953
954         return 0;
955 }
956
957 /* Multi port ordered to atomic transaction */
958 static int
959 test_multi_port_flow_ordered_to_atomic(void)
960 {
961         /* Ingress event order test */
962         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
963                                                    RTE_SCHED_TYPE_ATOMIC);
964 }
965
966 static int
967 test_multi_port_flow_ordered_to_ordered(void)
968 {
969         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
970                                                    RTE_SCHED_TYPE_ORDERED);
971 }
972
973 static int
974 test_multi_port_flow_ordered_to_parallel(void)
975 {
976         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
977                                                    RTE_SCHED_TYPE_PARALLEL);
978 }
979
980 static int
981 test_multi_port_flow_atomic_to_atomic(void)
982 {
983         /* Ingress event order test */
984         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
985                                                    RTE_SCHED_TYPE_ATOMIC);
986 }
987
988 static int
989 test_multi_port_flow_atomic_to_ordered(void)
990 {
991         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
992                                                    RTE_SCHED_TYPE_ORDERED);
993 }
994
995 static int
996 test_multi_port_flow_atomic_to_parallel(void)
997 {
998         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
999                                                    RTE_SCHED_TYPE_PARALLEL);
1000 }
1001
1002 static int
1003 test_multi_port_flow_parallel_to_atomic(void)
1004 {
1005         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1006                                                    RTE_SCHED_TYPE_ATOMIC);
1007 }
1008
1009 static int
1010 test_multi_port_flow_parallel_to_ordered(void)
1011 {
1012         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1013                                                    RTE_SCHED_TYPE_ORDERED);
1014 }
1015
1016 static int
1017 test_multi_port_flow_parallel_to_parallel(void)
1018 {
1019         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1020                                                    RTE_SCHED_TYPE_PARALLEL);
1021 }
1022
1023 static int
1024 worker_group_based_pipeline(void *arg)
1025 {
1026         struct test_core_param *param = arg;
1027         uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
1028         rte_atomic32_t *total_events = param->total_events;
1029         uint8_t new_sched_type = param->sched_type;
1030         uint8_t port = param->port;
1031         uint16_t valid_event;
1032         struct rte_event ev;
1033
1034         while (rte_atomic32_read(total_events) > 0) {
1035                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
1036                                                       dequeue_tmo_ticks);
1037                 if (!valid_event)
1038                         continue;
1039
1040                 /* Events from stage 0(group 0) */
1041                 if (ev.queue_id == 0) {
1042                         /* Move to atomic flow to maintain the ordering */
1043                         ev.flow_id = 0x2;
1044                         ev.event_type = RTE_EVENT_TYPE_CPU;
1045                         ev.sched_type = new_sched_type;
1046                         ev.queue_id = 1; /* Stage 1*/
1047                         ev.op = RTE_EVENT_OP_FORWARD;
1048                         rte_event_enqueue_burst(evdev, port, &ev, 1);
1049                 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
1050                         uint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);
1051
1052                         if (seqn_list_update(seqn) == 0) {
1053                                 rte_pktmbuf_free(ev.mbuf);
1054                                 rte_atomic32_sub(total_events, 1);
1055                         } else {
1056                                 otx2_err("Failed to update seqn_list");
1057                                 return -1;
1058                         }
1059                 } else {
1060                         otx2_err("Invalid ev.queue_id = %d", ev.queue_id);
1061                         return -1;
1062                 }
1063         }
1064
1065         return 0;
1066 }
1067
1068 static int
1069 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1070                                      uint8_t out_sched_type)
1071 {
1072         const unsigned int total_events = MAX_EVENTS;
1073         uint32_t queue_count;
1074         uint32_t nr_ports;
1075         int ret;
1076
1077         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1078                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
1079                                 "Port count get failed");
1080
1081         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1082
1083         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1084                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1085                                 "Queue count get failed");
1086         if (queue_count < 2 ||  !nr_ports) {
1087                 otx2_err("Not enough queues=%d ports=%d or workers=%d",
1088                          queue_count, nr_ports,
1089                          rte_lcore_count() - 1);
1090                 return 0;
1091         }
1092
1093         /* Injects events with a 0 sequence number to total_events */
1094         ret = inject_events(0x1 /*flow_id */,
1095                             RTE_EVENT_TYPE_CPU /* event_type */,
1096                             0 /* sub_event_type (stage 0) */,
1097                             in_sched_type,
1098                             0 /* queue */,
1099                             0 /* port */,
1100                             total_events /* events */);
1101         if (ret)
1102                 return -1;
1103
1104         ret = launch_workers_and_wait(worker_group_based_pipeline,
1105                                       worker_group_based_pipeline, total_events,
1106                                       nr_ports, out_sched_type);
1107         if (ret)
1108                 return -1;
1109
1110         if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1111             out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1112                 /* Check the events order maintained or not */
1113                 return seqn_list_check(total_events);
1114         }
1115
1116         return 0;
1117 }
1118
1119 static int
1120 test_multi_port_queue_ordered_to_atomic(void)
1121 {
1122         /* Ingress event order test */
1123         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1124                                                     RTE_SCHED_TYPE_ATOMIC);
1125 }
1126
1127 static int
1128 test_multi_port_queue_ordered_to_ordered(void)
1129 {
1130         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1131                                                     RTE_SCHED_TYPE_ORDERED);
1132 }
1133
1134 static int
1135 test_multi_port_queue_ordered_to_parallel(void)
1136 {
1137         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1138                                                     RTE_SCHED_TYPE_PARALLEL);
1139 }
1140
1141 static int
1142 test_multi_port_queue_atomic_to_atomic(void)
1143 {
1144         /* Ingress event order test */
1145         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1146                                                     RTE_SCHED_TYPE_ATOMIC);
1147 }
1148
1149 static int
1150 test_multi_port_queue_atomic_to_ordered(void)
1151 {
1152         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1153                                                     RTE_SCHED_TYPE_ORDERED);
1154 }
1155
1156 static int
1157 test_multi_port_queue_atomic_to_parallel(void)
1158 {
1159         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1160                                                     RTE_SCHED_TYPE_PARALLEL);
1161 }
1162
1163 static int
1164 test_multi_port_queue_parallel_to_atomic(void)
1165 {
1166         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1167                                                     RTE_SCHED_TYPE_ATOMIC);
1168 }
1169
1170 static int
1171 test_multi_port_queue_parallel_to_ordered(void)
1172 {
1173         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1174                                                     RTE_SCHED_TYPE_ORDERED);
1175 }
1176
1177 static int
1178 test_multi_port_queue_parallel_to_parallel(void)
1179 {
1180         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1181                                                     RTE_SCHED_TYPE_PARALLEL);
1182 }
1183
1184 static int
1185 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1186 {
1187         struct test_core_param *param = arg;
1188         rte_atomic32_t *total_events = param->total_events;
1189         uint8_t port = param->port;
1190         uint16_t valid_event;
1191         struct rte_event ev;
1192
1193         while (rte_atomic32_read(total_events) > 0) {
1194                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1195                 if (!valid_event)
1196                         continue;
1197
1198                 if (ev.sub_event_type == 255) { /* last stage */
1199                         rte_pktmbuf_free(ev.mbuf);
1200                         rte_atomic32_sub(total_events, 1);
1201                 } else {
1202                         ev.event_type = RTE_EVENT_TYPE_CPU;
1203                         ev.sub_event_type++;
1204                         ev.sched_type =
1205                                 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1206                         ev.op = RTE_EVENT_OP_FORWARD;
1207                         rte_event_enqueue_burst(evdev, port, &ev, 1);
1208                 }
1209         }
1210
1211         return 0;
1212 }
1213
1214 static int
1215 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1216 {
1217         uint32_t nr_ports;
1218         int ret;
1219
1220         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1221                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
1222                                 "Port count get failed");
1223         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1224
1225         if (!nr_ports) {
1226                 otx2_err("Not enough ports=%d or workers=%d",
1227                          nr_ports, rte_lcore_count() - 1);
1228                 return 0;
1229         }
1230
1231         /* Injects events with a 0 sequence number to total_events */
1232         ret = inject_events(0x1 /*flow_id */,
1233                             RTE_EVENT_TYPE_CPU /* event_type */,
1234                             0 /* sub_event_type (stage 0) */,
1235                             rte_rand() %
1236                                 (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1237                             0 /* queue */,
1238                             0 /* port */,
1239                             MAX_EVENTS /* events */);
1240         if (ret)
1241                 return -1;
1242
1243         return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1244                                        0xff /* invalid */);
1245 }
1246
1247 /* Flow based pipeline with maximum stages with random sched type */
1248 static int
1249 test_multi_port_flow_max_stages_random_sched_type(void)
1250 {
1251         return launch_multi_port_max_stages_random_sched_type(
1252                 worker_flow_based_pipeline_max_stages_rand_sched_type);
1253 }
1254
1255 static int
1256 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1257 {
1258         struct test_core_param *param = arg;
1259         uint8_t port = param->port;
1260         uint32_t queue_count;
1261         uint16_t valid_event;
1262         struct rte_event ev;
1263
1264         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1265                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1266                                 "Queue count get failed");
1267         uint8_t nr_queues = queue_count;
1268         rte_atomic32_t *total_events = param->total_events;
1269
1270         while (rte_atomic32_read(total_events) > 0) {
1271                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1272                 if (!valid_event)
1273                         continue;
1274
1275                 if (ev.queue_id == nr_queues - 1) { /* last stage */
1276                         rte_pktmbuf_free(ev.mbuf);
1277                         rte_atomic32_sub(total_events, 1);
1278                 } else {
1279                         ev.event_type = RTE_EVENT_TYPE_CPU;
1280                         ev.queue_id++;
1281                         ev.sched_type =
1282                                 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1283                         ev.op = RTE_EVENT_OP_FORWARD;
1284                         rte_event_enqueue_burst(evdev, port, &ev, 1);
1285                 }
1286         }
1287
1288         return 0;
1289 }
1290
1291 /* Queue based pipeline with maximum stages with random sched type */
1292 static int
1293 test_multi_port_queue_max_stages_random_sched_type(void)
1294 {
1295         return launch_multi_port_max_stages_random_sched_type(
1296                 worker_queue_based_pipeline_max_stages_rand_sched_type);
1297 }
1298
1299 static int
1300 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1301 {
1302         struct test_core_param *param = arg;
1303         uint8_t port = param->port;
1304         uint32_t queue_count;
1305         uint16_t valid_event;
1306         struct rte_event ev;
1307
1308         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1309                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1310                                 "Queue count get failed");
1311         uint8_t nr_queues = queue_count;
1312         rte_atomic32_t *total_events = param->total_events;
1313
1314         while (rte_atomic32_read(total_events) > 0) {
1315                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1316                 if (!valid_event)
1317                         continue;
1318
1319                 if (ev.queue_id == nr_queues - 1) { /* Last stage */
1320                         rte_pktmbuf_free(ev.mbuf);
1321                         rte_atomic32_sub(total_events, 1);
1322                 } else {
1323                         ev.event_type = RTE_EVENT_TYPE_CPU;
1324                         ev.queue_id++;
1325                         ev.sub_event_type = rte_rand() % 256;
1326                         ev.sched_type =
1327                                 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1328                         ev.op = RTE_EVENT_OP_FORWARD;
1329                         rte_event_enqueue_burst(evdev, port, &ev, 1);
1330                 }
1331         }
1332
1333         return 0;
1334 }
1335
1336 /* Queue and flow based pipeline with maximum stages with random sched type */
1337 static int
1338 test_multi_port_mixed_max_stages_random_sched_type(void)
1339 {
1340         return launch_multi_port_max_stages_random_sched_type(
1341                 worker_mixed_pipeline_max_stages_rand_sched_type);
1342 }
1343
1344 static int
1345 worker_ordered_flow_producer(void *arg)
1346 {
1347         struct test_core_param *param = arg;
1348         uint8_t port = param->port;
1349         struct rte_mbuf *m;
1350         int counter = 0;
1351
1352         while (counter < NUM_PACKETS) {
1353                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
1354                 if (m == NULL)
1355                         continue;
1356
1357                 *rte_event_pmd_selftest_seqn(m) = counter++;
1358
1359                 struct rte_event ev = {.event = 0, .u64 = 0};
1360
1361                 ev.flow_id = 0x1; /* Generate a fat flow */
1362                 ev.sub_event_type = 0;
1363                 /* Inject the new event */
1364                 ev.op = RTE_EVENT_OP_NEW;
1365                 ev.event_type = RTE_EVENT_TYPE_CPU;
1366                 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1367                 ev.queue_id = 0;
1368                 ev.mbuf = m;
1369                 rte_event_enqueue_burst(evdev, port, &ev, 1);
1370         }
1371
1372         return 0;
1373 }
1374
1375 static inline int
1376 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1377 {
1378         uint32_t nr_ports;
1379
1380         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1381                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
1382                                 "Port count get failed");
1383         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1384
1385         if (rte_lcore_count() < 3 || nr_ports < 2) {
1386                 otx2_err("### Not enough cores for test.");
1387                 return 0;
1388         }
1389
1390         launch_workers_and_wait(worker_ordered_flow_producer, fn,
1391                                 NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
1392         /* Check the events order maintained or not */
1393         return seqn_list_check(NUM_PACKETS);
1394 }
1395
1396 /* Flow based producer consumer ingress order test */
1397 static int
1398 test_flow_producer_consumer_ingress_order_test(void)
1399 {
1400         return test_producer_consumer_ingress_order_test(
1401                                 worker_flow_based_pipeline);
1402 }
1403
1404 /* Queue based producer consumer ingress order test */
1405 static int
1406 test_queue_producer_consumer_ingress_order_test(void)
1407 {
1408         return test_producer_consumer_ingress_order_test(
1409                                 worker_group_based_pipeline);
1410 }
1411
1412 static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),
1413                               int (*test)(void), const char *name)
1414 {
1415         if (setup() < 0) {
1416                 printf("Error setting up test %s", name);
1417                 unsupported++;
1418         } else {
1419                 if (test() < 0) {
1420                         failed++;
1421                         printf("+ TestCase [%2d] : %s failed\n", total, name);
1422                 } else {
1423                         passed++;
1424                         printf("+ TestCase [%2d] : %s succeeded\n", total,
1425                                name);
1426                 }
1427         }
1428
1429         total++;
1430         tdown();
1431 }
1432
1433 int
1434 otx2_sso_selftest(void)
1435 {
1436         testsuite_setup();
1437
1438         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1439                            test_simple_enqdeq_ordered);
1440         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1441                            test_simple_enqdeq_atomic);
1442         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1443                            test_simple_enqdeq_parallel);
1444         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1445                            test_multi_queue_enq_single_port_deq);
1446         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1447                            test_dev_stop_flush);
1448         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1449                            test_multi_queue_enq_multi_port_deq);
1450         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1451                            test_queue_to_port_single_link);
1452         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1453                            test_queue_to_port_multi_link);
1454         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1455                            test_multi_port_flow_ordered_to_atomic);
1456         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1457                            test_multi_port_flow_ordered_to_ordered);
1458         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1459                            test_multi_port_flow_ordered_to_parallel);
1460         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1461                            test_multi_port_flow_atomic_to_atomic);
1462         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1463                            test_multi_port_flow_atomic_to_ordered);
1464         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1465                            test_multi_port_flow_atomic_to_parallel);
1466         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1467                            test_multi_port_flow_parallel_to_atomic);
1468         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1469                            test_multi_port_flow_parallel_to_ordered);
1470         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1471                            test_multi_port_flow_parallel_to_parallel);
1472         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1473                            test_multi_port_queue_ordered_to_atomic);
1474         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1475                            test_multi_port_queue_ordered_to_ordered);
1476         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1477                            test_multi_port_queue_ordered_to_parallel);
1478         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1479                            test_multi_port_queue_atomic_to_atomic);
1480         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1481                            test_multi_port_queue_atomic_to_ordered);
1482         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1483                            test_multi_port_queue_atomic_to_parallel);
1484         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1485                            test_multi_port_queue_parallel_to_atomic);
1486         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1487                            test_multi_port_queue_parallel_to_ordered);
1488         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1489                            test_multi_port_queue_parallel_to_parallel);
1490         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1491                            test_multi_port_flow_max_stages_random_sched_type);
1492         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1493                            test_multi_port_queue_max_stages_random_sched_type);
1494         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1495                            test_multi_port_mixed_max_stages_random_sched_type);
1496         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1497                            test_flow_producer_consumer_ingress_order_test);
1498         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1499                            test_queue_producer_consumer_ingress_order_test);
1500         OCTEONTX2_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
1501                            test_multi_queue_priority);
1502         OCTEONTX2_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1503                            test_multi_port_flow_ordered_to_atomic);
1504         OCTEONTX2_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1505                            test_multi_port_queue_ordered_to_atomic);
1506         printf("Total tests   : %d\n", total);
1507         printf("Passed        : %d\n", passed);
1508         printf("Failed        : %d\n", failed);
1509         printf("Not supported : %d\n", unsupported);
1510
1511         testsuite_teardown();
1512
1513         if (failed)
1514                 return -1;
1515
1516         return 0;
1517 }