net/mlx5: convert configuration objects to unified malloc
[dpdk.git] / drivers / event / octeontx2 / otx2_evdev_selftest.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_debug.h>
9 #include <rte_eal.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
13 #include <rte_launch.h>
14 #include <rte_lcore.h>
15 #include <rte_mbuf.h>
16 #include <rte_malloc.h>
17 #include <rte_memcpy.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
20 #include <rte_test.h>
21
22 #include "otx2_evdev.h"
23
24 #define NUM_PACKETS (1024)
25 #define MAX_EVENTS  (1024)
26
27 #define OCTEONTX2_TEST_RUN(setup, teardown, test) \
28         octeontx_test_run(setup, teardown, test, #test)
29
30 static int total;
31 static int passed;
32 static int failed;
33 static int unsupported;
34
35 static int evdev;
36 static struct rte_mempool *eventdev_test_mempool;
37
38 struct event_attr {
39         uint32_t flow_id;
40         uint8_t event_type;
41         uint8_t sub_event_type;
42         uint8_t sched_type;
43         uint8_t queue;
44         uint8_t port;
45 };
46
47 static uint32_t seqn_list_index;
48 static int seqn_list[NUM_PACKETS];
49
50 static inline void
51 seqn_list_init(void)
52 {
53         RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
54         memset(seqn_list, 0, sizeof(seqn_list));
55         seqn_list_index = 0;
56 }
57
58 static inline int
59 seqn_list_update(int val)
60 {
61         if (seqn_list_index >= NUM_PACKETS)
62                 return -1;
63
64         seqn_list[seqn_list_index++] = val;
65         rte_smp_wmb();
66         return 0;
67 }
68
69 static inline int
70 seqn_list_check(int limit)
71 {
72         int i;
73
74         for (i = 0; i < limit; i++) {
75                 if (seqn_list[i] != i) {
76                         otx2_err("Seqn mismatch %d %d", seqn_list[i], i);
77                         return -1;
78                 }
79         }
80         return 0;
81 }
82
83 struct test_core_param {
84         rte_atomic32_t *total_events;
85         uint64_t dequeue_tmo_ticks;
86         uint8_t port;
87         uint8_t sched_type;
88 };
89
90 static int
91 testsuite_setup(void)
92 {
93         const char *eventdev_name = "event_octeontx2";
94
95         evdev = rte_event_dev_get_dev_id(eventdev_name);
96         if (evdev < 0) {
97                 otx2_err("%d: Eventdev %s not found", __LINE__, eventdev_name);
98                 return -1;
99         }
100         return 0;
101 }
102
103 static void
104 testsuite_teardown(void)
105 {
106         rte_event_dev_close(evdev);
107 }
108
109 static inline void
110 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
111                                 struct rte_event_dev_info *info)
112 {
113         memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
114         dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
115         dev_conf->nb_event_ports = info->max_event_ports;
116         dev_conf->nb_event_queues = info->max_event_queues;
117         dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
118         dev_conf->nb_event_port_dequeue_depth =
119                         info->max_event_port_dequeue_depth;
120         dev_conf->nb_event_port_enqueue_depth =
121                         info->max_event_port_enqueue_depth;
122         dev_conf->nb_event_port_enqueue_depth =
123                         info->max_event_port_enqueue_depth;
124         dev_conf->nb_events_limit =
125                         info->max_num_events;
126 }
127
128 enum {
129         TEST_EVENTDEV_SETUP_DEFAULT,
130         TEST_EVENTDEV_SETUP_PRIORITY,
131         TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
132 };
133
134 static inline int
135 _eventdev_setup(int mode)
136 {
137         const char *pool_name = "evdev_octeontx_test_pool";
138         struct rte_event_dev_config dev_conf;
139         struct rte_event_dev_info info;
140         int i, ret;
141
142         /* Create and destrory pool for each test case to make it standalone */
143         eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name, MAX_EVENTS,
144                                                         0, 0, 512,
145                                                         rte_socket_id());
146         if (!eventdev_test_mempool) {
147                 otx2_err("ERROR creating mempool");
148                 return -1;
149         }
150
151         ret = rte_event_dev_info_get(evdev, &info);
152         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
153
154         devconf_set_default_sane_values(&dev_conf, &info);
155         if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
156                 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
157
158         ret = rte_event_dev_configure(evdev, &dev_conf);
159         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
160
161         uint32_t queue_count;
162         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
163                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
164                                 "Queue count get failed");
165
166         if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
167                 if (queue_count > 8)
168                         queue_count = 8;
169
170                 /* Configure event queues(0 to n) with
171                  * RTE_EVENT_DEV_PRIORITY_HIGHEST to
172                  * RTE_EVENT_DEV_PRIORITY_LOWEST
173                  */
174                 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
175                                 queue_count;
176                 for (i = 0; i < (int)queue_count; i++) {
177                         struct rte_event_queue_conf queue_conf;
178
179                         ret = rte_event_queue_default_conf_get(evdev, i,
180                                                                &queue_conf);
181                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
182                                                 i);
183                         queue_conf.priority = i * step;
184                         ret = rte_event_queue_setup(evdev, i, &queue_conf);
185                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
186                                                 i);
187                 }
188
189         } else {
190                 /* Configure event queues with default priority */
191                 for (i = 0; i < (int)queue_count; i++) {
192                         ret = rte_event_queue_setup(evdev, i, NULL);
193                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
194                                                 i);
195                 }
196         }
197         /* Configure event ports */
198         uint32_t port_count;
199         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
200                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &port_count),
201                                 "Port count get failed");
202         for (i = 0; i < (int)port_count; i++) {
203                 ret = rte_event_port_setup(evdev, i, NULL);
204                 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
205                 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
206                 RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
207                                 i);
208         }
209
210         ret = rte_event_dev_start(evdev);
211         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
212
213         return 0;
214 }
215
216 static inline int
217 eventdev_setup(void)
218 {
219         return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
220 }
221
222 static inline int
223 eventdev_setup_priority(void)
224 {
225         return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
226 }
227
228 static inline int
229 eventdev_setup_dequeue_timeout(void)
230 {
231         return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
232 }
233
234 static inline void
235 eventdev_teardown(void)
236 {
237         rte_event_dev_stop(evdev);
238         rte_mempool_free(eventdev_test_mempool);
239 }
240
241 static inline void
242 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
243                                  uint32_t flow_id, uint8_t event_type,
244                                  uint8_t sub_event_type, uint8_t sched_type,
245                                  uint8_t queue, uint8_t port)
246 {
247         struct event_attr *attr;
248
249         /* Store the event attributes in mbuf for future reference */
250         attr = rte_pktmbuf_mtod(m, struct event_attr *);
251         attr->flow_id = flow_id;
252         attr->event_type = event_type;
253         attr->sub_event_type = sub_event_type;
254         attr->sched_type = sched_type;
255         attr->queue = queue;
256         attr->port = port;
257
258         ev->flow_id = flow_id;
259         ev->sub_event_type = sub_event_type;
260         ev->event_type = event_type;
261         /* Inject the new event */
262         ev->op = RTE_EVENT_OP_NEW;
263         ev->sched_type = sched_type;
264         ev->queue_id = queue;
265         ev->mbuf = m;
266 }
267
268 static inline int
269 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
270               uint8_t sched_type, uint8_t queue, uint8_t port,
271               unsigned int events)
272 {
273         struct rte_mbuf *m;
274         unsigned int i;
275
276         for (i = 0; i < events; i++) {
277                 struct rte_event ev = {.event = 0, .u64 = 0};
278
279                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
280                 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
281
282                 m->seqn = i;
283                 update_event_and_validation_attr(m, &ev, flow_id, event_type,
284                                                  sub_event_type, sched_type,
285                                                  queue, port);
286                 rte_event_enqueue_burst(evdev, port, &ev, 1);
287         }
288         return 0;
289 }
290
291 static inline int
292 check_excess_events(uint8_t port)
293 {
294         uint16_t valid_event;
295         struct rte_event ev;
296         int i;
297
298         /* Check for excess events, try for a few times and exit */
299         for (i = 0; i < 32; i++) {
300                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
301
302                 RTE_TEST_ASSERT_SUCCESS(valid_event,
303                                         "Unexpected valid event=%d",
304                                         ev.mbuf->seqn);
305         }
306         return 0;
307 }
308
309 static inline int
310 generate_random_events(const unsigned int total_events)
311 {
312         struct rte_event_dev_info info;
313         uint32_t queue_count;
314         unsigned int i;
315         int ret;
316
317         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
318                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
319                                 "Queue count get failed");
320
321         ret = rte_event_dev_info_get(evdev, &info);
322         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
323         for (i = 0; i < total_events; i++) {
324                 ret = inject_events(
325                         rte_rand() % info.max_event_queue_flows /*flow_id */,
326                         RTE_EVENT_TYPE_CPU /* event_type */,
327                         rte_rand() % 256 /* sub_event_type */,
328                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
329                         rte_rand() % queue_count /* queue */,
330                         0 /* port */,
331                         1 /* events */);
332                 if (ret)
333                         return -1;
334         }
335         return ret;
336 }
337
338
339 static inline int
340 validate_event(struct rte_event *ev)
341 {
342         struct event_attr *attr;
343
344         attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
345         RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
346                               "flow_id mismatch enq=%d deq =%d",
347                               attr->flow_id, ev->flow_id);
348         RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
349                               "event_type mismatch enq=%d deq =%d",
350                               attr->event_type, ev->event_type);
351         RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
352                               "sub_event_type mismatch enq=%d deq =%d",
353                               attr->sub_event_type, ev->sub_event_type);
354         RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
355                               "sched_type mismatch enq=%d deq =%d",
356                               attr->sched_type, ev->sched_type);
357         RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
358                               "queue mismatch enq=%d deq =%d",
359                               attr->queue, ev->queue_id);
360         return 0;
361 }
362
363 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
364                                  struct rte_event *ev);
365
366 static inline int
367 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
368 {
369         uint32_t events = 0, forward_progress_cnt = 0, index = 0;
370         uint16_t valid_event;
371         struct rte_event ev;
372         int ret;
373
374         while (1) {
375                 if (++forward_progress_cnt > UINT16_MAX) {
376                         otx2_err("Detected deadlock");
377                         return -1;
378                 }
379
380                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
381                 if (!valid_event)
382                         continue;
383
384                 forward_progress_cnt = 0;
385                 ret = validate_event(&ev);
386                 if (ret)
387                         return -1;
388
389                 if (fn != NULL) {
390                         ret = fn(index, port, &ev);
391                         RTE_TEST_ASSERT_SUCCESS(ret,
392                                 "Failed to validate test specific event");
393                 }
394
395                 ++index;
396
397                 rte_pktmbuf_free(ev.mbuf);
398                 if (++events >= total_events)
399                         break;
400         }
401
402         return check_excess_events(port);
403 }
404
405 static int
406 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
407 {
408         RTE_SET_USED(port);
409         RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
410                               index, ev->mbuf->seqn);
411         return 0;
412 }
413
414 static inline int
415 test_simple_enqdeq(uint8_t sched_type)
416 {
417         int ret;
418
419         ret = inject_events(0 /*flow_id */,
420                             RTE_EVENT_TYPE_CPU /* event_type */,
421                             0 /* sub_event_type */,
422                             sched_type,
423                             0 /* queue */,
424                             0 /* port */,
425                             MAX_EVENTS);
426         if (ret)
427                 return -1;
428
429         return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
430 }
431
432 static int
433 test_simple_enqdeq_ordered(void)
434 {
435         return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
436 }
437
438 static int
439 test_simple_enqdeq_atomic(void)
440 {
441         return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
442 }
443
444 static int
445 test_simple_enqdeq_parallel(void)
446 {
447         return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
448 }
449
450 /*
451  * Generate a prescribed number of events and spread them across available
452  * queues. On dequeue, using single event port(port 0) verify the enqueued
453  * event attributes
454  */
455 static int
456 test_multi_queue_enq_single_port_deq(void)
457 {
458         int ret;
459
460         ret = generate_random_events(MAX_EVENTS);
461         if (ret)
462                 return -1;
463
464         return consume_events(0 /* port */, MAX_EVENTS, NULL);
465 }
466
467 /*
468  * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
469  * operation
470  *
471  * For example, Inject 32 events over 0..7 queues
472  * enqueue events 0, 8, 16, 24 in queue 0
473  * enqueue events 1, 9, 17, 25 in queue 1
474  * ..
475  * ..
476  * enqueue events 7, 15, 23, 31 in queue 7
477  *
478  * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
479  * order from queue0(highest priority) to queue7(lowest_priority)
480  */
481 static int
482 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
483 {
484         uint32_t queue_count;
485
486         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
487                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
488                                 "Queue count get failed");
489         if (queue_count > 8)
490                 queue_count = 8;
491         uint32_t range = MAX_EVENTS / queue_count;
492         uint32_t expected_val = (index % range) * queue_count;
493
494         expected_val += ev->queue_id;
495         RTE_SET_USED(port);
496         RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
497         "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
498                               ev->mbuf->seqn, index, expected_val, range,
499                               queue_count, MAX_EVENTS);
500         return 0;
501 }
502
503 static int
504 test_multi_queue_priority(void)
505 {
506         int i, max_evts_roundoff;
507         /* See validate_queue_priority() comments for priority validate logic */
508         uint32_t queue_count;
509         struct rte_mbuf *m;
510         uint8_t queue;
511
512         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
513                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
514                                 "Queue count get failed");
515         if (queue_count > 8)
516                 queue_count = 8;
517         max_evts_roundoff  = MAX_EVENTS / queue_count;
518         max_evts_roundoff *= queue_count;
519
520         for (i = 0; i < max_evts_roundoff; i++) {
521                 struct rte_event ev = {.event = 0, .u64 = 0};
522
523                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
524                 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
525
526                 m->seqn = i;
527                 queue = i % queue_count;
528                 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
529                                                  0, RTE_SCHED_TYPE_PARALLEL,
530                                                  queue, 0);
531                 rte_event_enqueue_burst(evdev, 0, &ev, 1);
532         }
533
534         return consume_events(0, max_evts_roundoff, validate_queue_priority);
535 }
536
537 static int
538 worker_multi_port_fn(void *arg)
539 {
540         struct test_core_param *param = arg;
541         rte_atomic32_t *total_events = param->total_events;
542         uint8_t port = param->port;
543         uint16_t valid_event;
544         struct rte_event ev;
545         int ret;
546
547         while (rte_atomic32_read(total_events) > 0) {
548                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
549                 if (!valid_event)
550                         continue;
551
552                 ret = validate_event(&ev);
553                 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
554                 rte_pktmbuf_free(ev.mbuf);
555                 rte_atomic32_sub(total_events, 1);
556         }
557
558         return 0;
559 }
560
561 static inline int
562 wait_workers_to_join(const rte_atomic32_t *count)
563 {
564         uint64_t cycles, print_cycles;
565
566         cycles = rte_get_timer_cycles();
567         print_cycles = cycles;
568         while (rte_atomic32_read(count)) {
569                 uint64_t new_cycles = rte_get_timer_cycles();
570
571                 if (new_cycles - print_cycles > rte_get_timer_hz()) {
572                         otx2_err("Events %d", rte_atomic32_read(count));
573                         print_cycles = new_cycles;
574                 }
575                 if (new_cycles - cycles > rte_get_timer_hz() * 10000000000) {
576                         otx2_err("No schedules for seconds, deadlock (%d)",
577                                  rte_atomic32_read(count));
578                         rte_event_dev_dump(evdev, stdout);
579                         cycles = new_cycles;
580                         return -1;
581                 }
582         }
583         rte_eal_mp_wait_lcore();
584
585         return 0;
586 }
587
588 static inline int
589 launch_workers_and_wait(int (*master_worker)(void *),
590                         int (*slave_workers)(void *), uint32_t total_events,
591                         uint8_t nb_workers, uint8_t sched_type)
592 {
593         rte_atomic32_t atomic_total_events;
594         struct test_core_param *param;
595         uint64_t dequeue_tmo_ticks;
596         uint8_t port = 0;
597         int w_lcore;
598         int ret;
599
600         if (!nb_workers)
601                 return 0;
602
603         rte_atomic32_set(&atomic_total_events, total_events);
604         seqn_list_init();
605
606         param = malloc(sizeof(struct test_core_param) * nb_workers);
607         if (!param)
608                 return -1;
609
610         ret = rte_event_dequeue_timeout_ticks(evdev,
611                                               rte_rand() % 10000000/* 10ms */,
612                                               &dequeue_tmo_ticks);
613         if (ret) {
614                 free(param);
615                 return -1;
616         }
617
618         param[0].total_events = &atomic_total_events;
619         param[0].sched_type = sched_type;
620         param[0].port = 0;
621         param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
622         rte_wmb();
623
624         w_lcore = rte_get_next_lcore(
625                         /* start core */ -1,
626                         /* skip master */ 1,
627                         /* wrap */ 0);
628         rte_eal_remote_launch(master_worker, &param[0], w_lcore);
629
630         for (port = 1; port < nb_workers; port++) {
631                 param[port].total_events = &atomic_total_events;
632                 param[port].sched_type = sched_type;
633                 param[port].port = port;
634                 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
635                 rte_smp_wmb();
636                 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
637                 rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
638         }
639
640         rte_smp_wmb();
641         ret = wait_workers_to_join(&atomic_total_events);
642         free(param);
643
644         return ret;
645 }
646
647 /*
648  * Generate a prescribed number of events and spread them across available
649  * queues. Dequeue the events through multiple ports and verify the enqueued
650  * event attributes
651  */
652 static int
653 test_multi_queue_enq_multi_port_deq(void)
654 {
655         const unsigned int total_events = MAX_EVENTS;
656         uint32_t nr_ports;
657         int ret;
658
659         ret = generate_random_events(total_events);
660         if (ret)
661                 return -1;
662
663         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
664                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
665                                 "Port count get failed");
666         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
667
668         if (!nr_ports) {
669                 otx2_err("Not enough ports=%d or workers=%d", nr_ports,
670                          rte_lcore_count() - 1);
671                 return 0;
672         }
673
674         return launch_workers_and_wait(worker_multi_port_fn,
675                                        worker_multi_port_fn, total_events,
676                                        nr_ports, 0xff /* invalid */);
677 }
678
679 static
680 void flush(uint8_t dev_id, struct rte_event event, void *arg)
681 {
682         unsigned int *count = arg;
683
684         RTE_SET_USED(dev_id);
685         if (event.event_type == RTE_EVENT_TYPE_CPU)
686                 *count = *count + 1;
687 }
688
689 static int
690 test_dev_stop_flush(void)
691 {
692         unsigned int total_events = MAX_EVENTS, count = 0;
693         int ret;
694
695         ret = generate_random_events(total_events);
696         if (ret)
697                 return -1;
698
699         ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
700         if (ret)
701                 return -2;
702         rte_event_dev_stop(evdev);
703         ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
704         if (ret)
705                 return -3;
706         RTE_TEST_ASSERT_EQUAL(total_events, count,
707                               "count mismatch total_events=%d count=%d",
708                               total_events, count);
709
710         return 0;
711 }
712
713 static int
714 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
715                                    struct rte_event *ev)
716 {
717         RTE_SET_USED(index);
718         RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
719                               "queue mismatch enq=%d deq =%d",
720                               port, ev->queue_id);
721
722         return 0;
723 }
724
725 /*
726  * Link queue x to port x and check correctness of link by checking
727  * queue_id == x on dequeue on the specific port x
728  */
729 static int
730 test_queue_to_port_single_link(void)
731 {
732         int i, nr_links, ret;
733         uint32_t queue_count;
734         uint32_t port_count;
735
736         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
737                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &port_count),
738                                 "Port count get failed");
739
740         /* Unlink all connections that created in eventdev_setup */
741         for (i = 0; i < (int)port_count; i++) {
742                 ret = rte_event_port_unlink(evdev, i, NULL, 0);
743                 RTE_TEST_ASSERT(ret >= 0,
744                                 "Failed to unlink all queues port=%d", i);
745         }
746
747         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
748                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
749                                 "Queue count get failed");
750
751         nr_links = RTE_MIN(port_count, queue_count);
752         const unsigned int total_events = MAX_EVENTS / nr_links;
753
754         /* Link queue x to port x and inject events to queue x through port x */
755         for (i = 0; i < nr_links; i++) {
756                 uint8_t queue = (uint8_t)i;
757
758                 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
759                 RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
760
761                 ret = inject_events(0x100 /*flow_id */,
762                                     RTE_EVENT_TYPE_CPU /* event_type */,
763                                     rte_rand() % 256 /* sub_event_type */,
764                                     rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
765                                     queue /* queue */, i /* port */,
766                                     total_events /* events */);
767                 if (ret)
768                         return -1;
769         }
770
771         /* Verify the events generated from correct queue */
772         for (i = 0; i < nr_links; i++) {
773                 ret = consume_events(i /* port */, total_events,
774                                      validate_queue_to_port_single_link);
775                 if (ret)
776                         return -1;
777         }
778
779         return 0;
780 }
781
782 static int
783 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
784                                   struct rte_event *ev)
785 {
786         RTE_SET_USED(index);
787         RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
788                               "queue mismatch enq=%d deq =%d",
789                               port, ev->queue_id);
790
791         return 0;
792 }
793
794 /*
795  * Link all even number of queues to port 0 and all odd number of queues to
796  * port 1 and verify the link connection on dequeue
797  */
798 static int
799 test_queue_to_port_multi_link(void)
800 {
801         int ret, port0_events = 0, port1_events = 0;
802         uint32_t nr_queues = 0;
803         uint32_t nr_ports = 0;
804         uint8_t queue, port;
805
806         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
807                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &nr_queues),
808                                 "Queue count get failed");
809         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
810                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &nr_queues),
811                                 "Queue count get failed");
812         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
813                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
814                                 "Port count get failed");
815
816         if (nr_ports < 2) {
817                 otx2_err("Not enough ports to test ports=%d", nr_ports);
818                 return 0;
819         }
820
821         /* Unlink all connections that created in eventdev_setup */
822         for (port = 0; port < nr_ports; port++) {
823                 ret = rte_event_port_unlink(evdev, port, NULL, 0);
824                 RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
825                                 port);
826         }
827
828         const unsigned int total_events = MAX_EVENTS / nr_queues;
829
830         /* Link all even number of queues to port0 and odd numbers to port 1*/
831         for (queue = 0; queue < nr_queues; queue++) {
832                 port = queue & 0x1;
833                 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
834                 RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
835                                 queue, port);
836
837                 ret = inject_events(0x100 /*flow_id */,
838                                     RTE_EVENT_TYPE_CPU /* event_type */,
839                                     rte_rand() % 256 /* sub_event_type */,
840                                     rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
841                                     queue /* queue */, port /* port */,
842                                     total_events /* events */);
843                 if (ret)
844                         return -1;
845
846                 if (port == 0)
847                         port0_events += total_events;
848                 else
849                         port1_events += total_events;
850         }
851
852         ret = consume_events(0 /* port */, port0_events,
853                              validate_queue_to_port_multi_link);
854         if (ret)
855                 return -1;
856         ret = consume_events(1 /* port */, port1_events,
857                              validate_queue_to_port_multi_link);
858         if (ret)
859                 return -1;
860
861         return 0;
862 }
863
864 static int
865 worker_flow_based_pipeline(void *arg)
866 {
867         struct test_core_param *param = arg;
868         uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
869         rte_atomic32_t *total_events = param->total_events;
870         uint8_t new_sched_type = param->sched_type;
871         uint8_t port = param->port;
872         uint16_t valid_event;
873         struct rte_event ev;
874
875         while (rte_atomic32_read(total_events) > 0) {
876                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
877                                                       dequeue_tmo_ticks);
878                 if (!valid_event)
879                         continue;
880
881                 /* Events from stage 0 */
882                 if (ev.sub_event_type == 0) {
883                         /* Move to atomic flow to maintain the ordering */
884                         ev.flow_id = 0x2;
885                         ev.event_type = RTE_EVENT_TYPE_CPU;
886                         ev.sub_event_type = 1; /* stage 1 */
887                         ev.sched_type = new_sched_type;
888                         ev.op = RTE_EVENT_OP_FORWARD;
889                         rte_event_enqueue_burst(evdev, port, &ev, 1);
890                 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
891                         if (seqn_list_update(ev.mbuf->seqn) == 0) {
892                                 rte_pktmbuf_free(ev.mbuf);
893                                 rte_atomic32_sub(total_events, 1);
894                         } else {
895                                 otx2_err("Failed to update seqn_list");
896                                 return -1;
897                         }
898                 } else {
899                         otx2_err("Invalid ev.sub_event_type = %d",
900                                  ev.sub_event_type);
901                         return -1;
902                 }
903         }
904         return 0;
905 }
906
907 static int
908 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
909                                     uint8_t out_sched_type)
910 {
911         const unsigned int total_events = MAX_EVENTS;
912         uint32_t nr_ports;
913         int ret;
914
915         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
916                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
917                                 "Port count get failed");
918         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
919
920         if (!nr_ports) {
921                 otx2_err("Not enough ports=%d or workers=%d", nr_ports,
922                          rte_lcore_count() - 1);
923                 return 0;
924         }
925
926         /* Injects events with m->seqn=0 to total_events */
927         ret = inject_events(0x1 /*flow_id */,
928                             RTE_EVENT_TYPE_CPU /* event_type */,
929                             0 /* sub_event_type (stage 0) */,
930                             in_sched_type,
931                             0 /* queue */,
932                             0 /* port */,
933                             total_events /* events */);
934         if (ret)
935                 return -1;
936
937         rte_mb();
938         ret = launch_workers_and_wait(worker_flow_based_pipeline,
939                                       worker_flow_based_pipeline, total_events,
940                                       nr_ports, out_sched_type);
941         if (ret)
942                 return -1;
943
944         if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
945             out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
946                 /* Check the events order maintained or not */
947                 return seqn_list_check(total_events);
948         }
949
950         return 0;
951 }
952
953 /* Multi port ordered to atomic transaction */
954 static int
955 test_multi_port_flow_ordered_to_atomic(void)
956 {
957         /* Ingress event order test */
958         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
959                                                    RTE_SCHED_TYPE_ATOMIC);
960 }
961
962 static int
963 test_multi_port_flow_ordered_to_ordered(void)
964 {
965         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
966                                                    RTE_SCHED_TYPE_ORDERED);
967 }
968
969 static int
970 test_multi_port_flow_ordered_to_parallel(void)
971 {
972         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
973                                                    RTE_SCHED_TYPE_PARALLEL);
974 }
975
976 static int
977 test_multi_port_flow_atomic_to_atomic(void)
978 {
979         /* Ingress event order test */
980         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
981                                                    RTE_SCHED_TYPE_ATOMIC);
982 }
983
984 static int
985 test_multi_port_flow_atomic_to_ordered(void)
986 {
987         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
988                                                    RTE_SCHED_TYPE_ORDERED);
989 }
990
991 static int
992 test_multi_port_flow_atomic_to_parallel(void)
993 {
994         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
995                                                    RTE_SCHED_TYPE_PARALLEL);
996 }
997
998 static int
999 test_multi_port_flow_parallel_to_atomic(void)
1000 {
1001         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1002                                                    RTE_SCHED_TYPE_ATOMIC);
1003 }
1004
1005 static int
1006 test_multi_port_flow_parallel_to_ordered(void)
1007 {
1008         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1009                                                    RTE_SCHED_TYPE_ORDERED);
1010 }
1011
1012 static int
1013 test_multi_port_flow_parallel_to_parallel(void)
1014 {
1015         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1016                                                    RTE_SCHED_TYPE_PARALLEL);
1017 }
1018
1019 static int
1020 worker_group_based_pipeline(void *arg)
1021 {
1022         struct test_core_param *param = arg;
1023         uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
1024         rte_atomic32_t *total_events = param->total_events;
1025         uint8_t new_sched_type = param->sched_type;
1026         uint8_t port = param->port;
1027         uint16_t valid_event;
1028         struct rte_event ev;
1029
1030         while (rte_atomic32_read(total_events) > 0) {
1031                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
1032                                                       dequeue_tmo_ticks);
1033                 if (!valid_event)
1034                         continue;
1035
1036                 /* Events from stage 0(group 0) */
1037                 if (ev.queue_id == 0) {
1038                         /* Move to atomic flow to maintain the ordering */
1039                         ev.flow_id = 0x2;
1040                         ev.event_type = RTE_EVENT_TYPE_CPU;
1041                         ev.sched_type = new_sched_type;
1042                         ev.queue_id = 1; /* Stage 1*/
1043                         ev.op = RTE_EVENT_OP_FORWARD;
1044                         rte_event_enqueue_burst(evdev, port, &ev, 1);
1045                 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
1046                         if (seqn_list_update(ev.mbuf->seqn) == 0) {
1047                                 rte_pktmbuf_free(ev.mbuf);
1048                                 rte_atomic32_sub(total_events, 1);
1049                         } else {
1050                                 otx2_err("Failed to update seqn_list");
1051                                 return -1;
1052                         }
1053                 } else {
1054                         otx2_err("Invalid ev.queue_id = %d", ev.queue_id);
1055                         return -1;
1056                 }
1057         }
1058
1059         return 0;
1060 }
1061
1062 static int
1063 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1064                                      uint8_t out_sched_type)
1065 {
1066         const unsigned int total_events = MAX_EVENTS;
1067         uint32_t queue_count;
1068         uint32_t nr_ports;
1069         int ret;
1070
1071         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1072                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
1073                                 "Port count get failed");
1074
1075         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1076
1077         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1078                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1079                                 "Queue count get failed");
1080         if (queue_count < 2 ||  !nr_ports) {
1081                 otx2_err("Not enough queues=%d ports=%d or workers=%d",
1082                          queue_count, nr_ports,
1083                          rte_lcore_count() - 1);
1084                 return 0;
1085         }
1086
1087         /* Injects events with m->seqn=0 to total_events */
1088         ret = inject_events(0x1 /*flow_id */,
1089                             RTE_EVENT_TYPE_CPU /* event_type */,
1090                             0 /* sub_event_type (stage 0) */,
1091                             in_sched_type,
1092                             0 /* queue */,
1093                             0 /* port */,
1094                             total_events /* events */);
1095         if (ret)
1096                 return -1;
1097
1098         ret = launch_workers_and_wait(worker_group_based_pipeline,
1099                                       worker_group_based_pipeline, total_events,
1100                                       nr_ports, out_sched_type);
1101         if (ret)
1102                 return -1;
1103
1104         if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1105             out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1106                 /* Check the events order maintained or not */
1107                 return seqn_list_check(total_events);
1108         }
1109
1110         return 0;
1111 }
1112
1113 static int
1114 test_multi_port_queue_ordered_to_atomic(void)
1115 {
1116         /* Ingress event order test */
1117         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1118                                                     RTE_SCHED_TYPE_ATOMIC);
1119 }
1120
1121 static int
1122 test_multi_port_queue_ordered_to_ordered(void)
1123 {
1124         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1125                                                     RTE_SCHED_TYPE_ORDERED);
1126 }
1127
1128 static int
1129 test_multi_port_queue_ordered_to_parallel(void)
1130 {
1131         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1132                                                     RTE_SCHED_TYPE_PARALLEL);
1133 }
1134
1135 static int
1136 test_multi_port_queue_atomic_to_atomic(void)
1137 {
1138         /* Ingress event order test */
1139         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1140                                                     RTE_SCHED_TYPE_ATOMIC);
1141 }
1142
1143 static int
1144 test_multi_port_queue_atomic_to_ordered(void)
1145 {
1146         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1147                                                     RTE_SCHED_TYPE_ORDERED);
1148 }
1149
1150 static int
1151 test_multi_port_queue_atomic_to_parallel(void)
1152 {
1153         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1154                                                     RTE_SCHED_TYPE_PARALLEL);
1155 }
1156
1157 static int
1158 test_multi_port_queue_parallel_to_atomic(void)
1159 {
1160         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1161                                                     RTE_SCHED_TYPE_ATOMIC);
1162 }
1163
1164 static int
1165 test_multi_port_queue_parallel_to_ordered(void)
1166 {
1167         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1168                                                     RTE_SCHED_TYPE_ORDERED);
1169 }
1170
1171 static int
1172 test_multi_port_queue_parallel_to_parallel(void)
1173 {
1174         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1175                                                     RTE_SCHED_TYPE_PARALLEL);
1176 }
1177
1178 static int
1179 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1180 {
1181         struct test_core_param *param = arg;
1182         rte_atomic32_t *total_events = param->total_events;
1183         uint8_t port = param->port;
1184         uint16_t valid_event;
1185         struct rte_event ev;
1186
1187         while (rte_atomic32_read(total_events) > 0) {
1188                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1189                 if (!valid_event)
1190                         continue;
1191
1192                 if (ev.sub_event_type == 255) { /* last stage */
1193                         rte_pktmbuf_free(ev.mbuf);
1194                         rte_atomic32_sub(total_events, 1);
1195                 } else {
1196                         ev.event_type = RTE_EVENT_TYPE_CPU;
1197                         ev.sub_event_type++;
1198                         ev.sched_type =
1199                                 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1200                         ev.op = RTE_EVENT_OP_FORWARD;
1201                         rte_event_enqueue_burst(evdev, port, &ev, 1);
1202                 }
1203         }
1204
1205         return 0;
1206 }
1207
1208 static int
1209 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1210 {
1211         uint32_t nr_ports;
1212         int ret;
1213
1214         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1215                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
1216                                 "Port count get failed");
1217         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1218
1219         if (!nr_ports) {
1220                 otx2_err("Not enough ports=%d or workers=%d",
1221                          nr_ports, rte_lcore_count() - 1);
1222                 return 0;
1223         }
1224
1225         /* Injects events with m->seqn=0 to total_events */
1226         ret = inject_events(0x1 /*flow_id */,
1227                             RTE_EVENT_TYPE_CPU /* event_type */,
1228                             0 /* sub_event_type (stage 0) */,
1229                             rte_rand() %
1230                                 (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1231                             0 /* queue */,
1232                             0 /* port */,
1233                             MAX_EVENTS /* events */);
1234         if (ret)
1235                 return -1;
1236
1237         return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1238                                        0xff /* invalid */);
1239 }
1240
1241 /* Flow based pipeline with maximum stages with random sched type */
1242 static int
1243 test_multi_port_flow_max_stages_random_sched_type(void)
1244 {
1245         return launch_multi_port_max_stages_random_sched_type(
1246                 worker_flow_based_pipeline_max_stages_rand_sched_type);
1247 }
1248
1249 static int
1250 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1251 {
1252         struct test_core_param *param = arg;
1253         uint8_t port = param->port;
1254         uint32_t queue_count;
1255         uint16_t valid_event;
1256         struct rte_event ev;
1257
1258         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1259                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1260                                 "Queue count get failed");
1261         uint8_t nr_queues = queue_count;
1262         rte_atomic32_t *total_events = param->total_events;
1263
1264         while (rte_atomic32_read(total_events) > 0) {
1265                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1266                 if (!valid_event)
1267                         continue;
1268
1269                 if (ev.queue_id == nr_queues - 1) { /* last stage */
1270                         rte_pktmbuf_free(ev.mbuf);
1271                         rte_atomic32_sub(total_events, 1);
1272                 } else {
1273                         ev.event_type = RTE_EVENT_TYPE_CPU;
1274                         ev.queue_id++;
1275                         ev.sched_type =
1276                                 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1277                         ev.op = RTE_EVENT_OP_FORWARD;
1278                         rte_event_enqueue_burst(evdev, port, &ev, 1);
1279                 }
1280         }
1281
1282         return 0;
1283 }
1284
1285 /* Queue based pipeline with maximum stages with random sched type */
1286 static int
1287 test_multi_port_queue_max_stages_random_sched_type(void)
1288 {
1289         return launch_multi_port_max_stages_random_sched_type(
1290                 worker_queue_based_pipeline_max_stages_rand_sched_type);
1291 }
1292
1293 static int
1294 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1295 {
1296         struct test_core_param *param = arg;
1297         uint8_t port = param->port;
1298         uint32_t queue_count;
1299         uint16_t valid_event;
1300         struct rte_event ev;
1301
1302         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1303                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1304                                 "Queue count get failed");
1305         uint8_t nr_queues = queue_count;
1306         rte_atomic32_t *total_events = param->total_events;
1307
1308         while (rte_atomic32_read(total_events) > 0) {
1309                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1310                 if (!valid_event)
1311                         continue;
1312
1313                 if (ev.queue_id == nr_queues - 1) { /* Last stage */
1314                         rte_pktmbuf_free(ev.mbuf);
1315                         rte_atomic32_sub(total_events, 1);
1316                 } else {
1317                         ev.event_type = RTE_EVENT_TYPE_CPU;
1318                         ev.queue_id++;
1319                         ev.sub_event_type = rte_rand() % 256;
1320                         ev.sched_type =
1321                                 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1322                         ev.op = RTE_EVENT_OP_FORWARD;
1323                         rte_event_enqueue_burst(evdev, port, &ev, 1);
1324                 }
1325         }
1326
1327         return 0;
1328 }
1329
1330 /* Queue and flow based pipeline with maximum stages with random sched type */
1331 static int
1332 test_multi_port_mixed_max_stages_random_sched_type(void)
1333 {
1334         return launch_multi_port_max_stages_random_sched_type(
1335                 worker_mixed_pipeline_max_stages_rand_sched_type);
1336 }
1337
1338 static int
1339 worker_ordered_flow_producer(void *arg)
1340 {
1341         struct test_core_param *param = arg;
1342         uint8_t port = param->port;
1343         struct rte_mbuf *m;
1344         int counter = 0;
1345
1346         while (counter < NUM_PACKETS) {
1347                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
1348                 if (m == NULL)
1349                         continue;
1350
1351                 m->seqn = counter++;
1352
1353                 struct rte_event ev = {.event = 0, .u64 = 0};
1354
1355                 ev.flow_id = 0x1; /* Generate a fat flow */
1356                 ev.sub_event_type = 0;
1357                 /* Inject the new event */
1358                 ev.op = RTE_EVENT_OP_NEW;
1359                 ev.event_type = RTE_EVENT_TYPE_CPU;
1360                 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1361                 ev.queue_id = 0;
1362                 ev.mbuf = m;
1363                 rte_event_enqueue_burst(evdev, port, &ev, 1);
1364         }
1365
1366         return 0;
1367 }
1368
1369 static inline int
1370 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1371 {
1372         uint32_t nr_ports;
1373
1374         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1375                                 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
1376                                 "Port count get failed");
1377         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1378
1379         if (rte_lcore_count() < 3 || nr_ports < 2) {
1380                 otx2_err("### Not enough cores for test.");
1381                 return 0;
1382         }
1383
1384         launch_workers_and_wait(worker_ordered_flow_producer, fn,
1385                                 NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
1386         /* Check the events order maintained or not */
1387         return seqn_list_check(NUM_PACKETS);
1388 }
1389
1390 /* Flow based producer consumer ingress order test */
1391 static int
1392 test_flow_producer_consumer_ingress_order_test(void)
1393 {
1394         return test_producer_consumer_ingress_order_test(
1395                                 worker_flow_based_pipeline);
1396 }
1397
1398 /* Queue based producer consumer ingress order test */
1399 static int
1400 test_queue_producer_consumer_ingress_order_test(void)
1401 {
1402         return test_producer_consumer_ingress_order_test(
1403                                 worker_group_based_pipeline);
1404 }
1405
1406 static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),
1407                               int (*test)(void), const char *name)
1408 {
1409         if (setup() < 0) {
1410                 printf("Error setting up test %s", name);
1411                 unsupported++;
1412         } else {
1413                 if (test() < 0) {
1414                         failed++;
1415                         printf("+ TestCase [%2d] : %s failed\n", total, name);
1416                 } else {
1417                         passed++;
1418                         printf("+ TestCase [%2d] : %s succeeded\n", total,
1419                                name);
1420                 }
1421         }
1422
1423         total++;
1424         tdown();
1425 }
1426
1427 int
1428 otx2_sso_selftest(void)
1429 {
1430         testsuite_setup();
1431
1432         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1433                            test_simple_enqdeq_ordered);
1434         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1435                            test_simple_enqdeq_atomic);
1436         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1437                            test_simple_enqdeq_parallel);
1438         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1439                            test_multi_queue_enq_single_port_deq);
1440         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1441                            test_dev_stop_flush);
1442         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1443                            test_multi_queue_enq_multi_port_deq);
1444         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1445                            test_queue_to_port_single_link);
1446         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1447                            test_queue_to_port_multi_link);
1448         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1449                            test_multi_port_flow_ordered_to_atomic);
1450         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1451                            test_multi_port_flow_ordered_to_ordered);
1452         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1453                            test_multi_port_flow_ordered_to_parallel);
1454         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1455                            test_multi_port_flow_atomic_to_atomic);
1456         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1457                            test_multi_port_flow_atomic_to_ordered);
1458         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1459                            test_multi_port_flow_atomic_to_parallel);
1460         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1461                            test_multi_port_flow_parallel_to_atomic);
1462         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1463                            test_multi_port_flow_parallel_to_ordered);
1464         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1465                            test_multi_port_flow_parallel_to_parallel);
1466         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1467                            test_multi_port_queue_ordered_to_atomic);
1468         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1469                            test_multi_port_queue_ordered_to_ordered);
1470         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1471                            test_multi_port_queue_ordered_to_parallel);
1472         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1473                            test_multi_port_queue_atomic_to_atomic);
1474         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1475                            test_multi_port_queue_atomic_to_ordered);
1476         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1477                            test_multi_port_queue_atomic_to_parallel);
1478         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1479                            test_multi_port_queue_parallel_to_atomic);
1480         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1481                            test_multi_port_queue_parallel_to_ordered);
1482         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1483                            test_multi_port_queue_parallel_to_parallel);
1484         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1485                            test_multi_port_flow_max_stages_random_sched_type);
1486         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1487                            test_multi_port_queue_max_stages_random_sched_type);
1488         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1489                            test_multi_port_mixed_max_stages_random_sched_type);
1490         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1491                            test_flow_producer_consumer_ingress_order_test);
1492         OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1493                            test_queue_producer_consumer_ingress_order_test);
1494         OCTEONTX2_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
1495                            test_multi_queue_priority);
1496         OCTEONTX2_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1497                            test_multi_port_flow_ordered_to_atomic);
1498         OCTEONTX2_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1499                            test_multi_port_queue_ordered_to_atomic);
1500         printf("Total tests   : %d\n", total);
1501         printf("Passed        : %d\n", passed);
1502         printf("Failed        : %d\n", failed);
1503         printf("Not supported : %d\n", unsupported);
1504
1505         testsuite_teardown();
1506
1507         if (failed)
1508                 return -1;
1509
1510         return 0;
1511 }