test/eventdev: add octeontx queue based max stage
[dpdk.git] / test / test / test_eventdev_octeontx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Cavium networks. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *       * Redistributions of source code must retain the above copyright
11  *         notice, this list of conditions and the following disclaimer.
12  *       * Redistributions in binary form must reproduce the above copyright
13  *         notice, this list of conditions and the following disclaimer in
14  *         the documentation and/or other materials provided with the
15  *         distribution.
16  *       * Neither the name of Cavium networks nor the names of its
17  *         contributors may be used to endorse or promote products derived
18  *         from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_atomic.h>
34 #include <rte_common.h>
35 #include <rte_cycles.h>
36 #include <rte_debug.h>
37 #include <rte_eal.h>
38 #include <rte_ethdev.h>
39 #include <rte_eventdev.h>
40 #include <rte_hexdump.h>
41 #include <rte_mbuf.h>
42 #include <rte_malloc.h>
43 #include <rte_memcpy.h>
44 #include <rte_launch.h>
45 #include <rte_lcore.h>
46 #include <rte_per_lcore.h>
47 #include <rte_random.h>
48
49 #include "test.h"
50
51 #define NUM_PACKETS (1 << 18)
52 #define MAX_EVENTS  (16 * 1024)
53
54 static int evdev;
55 static struct rte_mempool *eventdev_test_mempool;
56
57 struct event_attr {
58         uint32_t flow_id;
59         uint8_t event_type;
60         uint8_t sub_event_type;
61         uint8_t sched_type;
62         uint8_t queue;
63         uint8_t port;
64 };
65
66 static uint32_t seqn_list_index;
67 static int seqn_list[NUM_PACKETS];
68
69 static inline void
70 seqn_list_init(void)
71 {
72         RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
73         memset(seqn_list, 0, sizeof(seqn_list));
74         seqn_list_index = 0;
75 }
76
77 static inline int
78 seqn_list_update(int val)
79 {
80         if (seqn_list_index >= NUM_PACKETS)
81                 return TEST_FAILED;
82
83         seqn_list[seqn_list_index++] = val;
84         rte_smp_wmb();
85         return TEST_SUCCESS;
86 }
87
88 static inline int
89 seqn_list_check(int limit)
90 {
91         int i;
92
93         for (i = 0; i < limit; i++) {
94                 if (seqn_list[i] != i) {
95                         printf("Seqn mismatch %d %d\n", seqn_list[i], i);
96                         return TEST_FAILED;
97                 }
98         }
99         return TEST_SUCCESS;
100 }
101
102 struct test_core_param {
103         rte_atomic32_t *total_events;
104         uint64_t dequeue_tmo_ticks;
105         uint8_t port;
106         uint8_t sched_type;
107 };
108
109 static int
110 testsuite_setup(void)
111 {
112         const char *eventdev_name = "event_octeontx";
113
114         evdev = rte_event_dev_get_dev_id(eventdev_name);
115         if (evdev < 0) {
116                 printf("%d: Eventdev %s not found - creating.\n",
117                                 __LINE__, eventdev_name);
118                 if (rte_eal_vdev_init(eventdev_name, NULL) < 0) {
119                         printf("Error creating eventdev %s\n", eventdev_name);
120                         return TEST_FAILED;
121                 }
122                 evdev = rte_event_dev_get_dev_id(eventdev_name);
123                 if (evdev < 0) {
124                         printf("Error finding newly created eventdev\n");
125                         return TEST_FAILED;
126                 }
127         }
128
129         return TEST_SUCCESS;
130 }
131
132 static void
133 testsuite_teardown(void)
134 {
135         rte_event_dev_close(evdev);
136 }
137
138 static inline void
139 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
140                         struct rte_event_dev_info *info)
141 {
142         memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
143         dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
144         dev_conf->nb_event_ports = info->max_event_ports;
145         dev_conf->nb_event_queues = info->max_event_queues;
146         dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
147         dev_conf->nb_event_port_dequeue_depth =
148                         info->max_event_port_dequeue_depth;
149         dev_conf->nb_event_port_enqueue_depth =
150                         info->max_event_port_enqueue_depth;
151         dev_conf->nb_event_port_enqueue_depth =
152                         info->max_event_port_enqueue_depth;
153         dev_conf->nb_events_limit =
154                         info->max_num_events;
155 }
156
157 enum {
158         TEST_EVENTDEV_SETUP_DEFAULT,
159         TEST_EVENTDEV_SETUP_PRIORITY,
160         TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
161 };
162
163 static inline int
164 _eventdev_setup(int mode)
165 {
166         int i, ret;
167         struct rte_event_dev_config dev_conf;
168         struct rte_event_dev_info info;
169         const char *pool_name = "evdev_octeontx_test_pool";
170
171         /* Create and destrory pool for each test case to make it standalone */
172         eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
173                                         MAX_EVENTS,
174                                         0 /*MBUF_CACHE_SIZE*/,
175                                         0,
176                                         512, /* Use very small mbufs */
177                                         rte_socket_id());
178         if (!eventdev_test_mempool) {
179                 printf("ERROR creating mempool\n");
180                 return TEST_FAILED;
181         }
182
183         ret = rte_event_dev_info_get(evdev, &info);
184         TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
185         TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
186                         "max_num_events=%d < max_events=%d",
187                         info.max_num_events, MAX_EVENTS);
188
189         devconf_set_default_sane_values(&dev_conf, &info);
190         if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
191                 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
192
193         ret = rte_event_dev_configure(evdev, &dev_conf);
194         TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
195
196         if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
197                 /* Configure event queues(0 to n) with
198                  * RTE_EVENT_DEV_PRIORITY_HIGHEST to
199                  * RTE_EVENT_DEV_PRIORITY_LOWEST
200                  */
201                 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
202                                 rte_event_queue_count(evdev);
203                 for (i = 0; i < rte_event_queue_count(evdev); i++) {
204                         struct rte_event_queue_conf queue_conf;
205
206                         ret = rte_event_queue_default_conf_get(evdev, i,
207                                                 &queue_conf);
208                         TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d", i);
209                         queue_conf.priority = i * step;
210                         ret = rte_event_queue_setup(evdev, i, &queue_conf);
211                         TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
212                 }
213
214         } else {
215                 /* Configure event queues with default priority */
216                 for (i = 0; i < rte_event_queue_count(evdev); i++) {
217                         ret = rte_event_queue_setup(evdev, i, NULL);
218                         TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
219                 }
220         }
221         /* Configure event ports */
222         for (i = 0; i < rte_event_port_count(evdev); i++) {
223                 ret = rte_event_port_setup(evdev, i, NULL);
224                 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
225                 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
226                 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", i);
227         }
228
229         ret = rte_event_dev_start(evdev);
230         TEST_ASSERT_SUCCESS(ret, "Failed to start device");
231
232         return TEST_SUCCESS;
233 }
234
235 static inline int
236 eventdev_setup(void)
237 {
238         return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
239 }
240
241 static inline int
242 eventdev_setup_priority(void)
243 {
244         return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
245 }
246
247 static inline void
248 eventdev_teardown(void)
249 {
250         rte_event_dev_stop(evdev);
251         rte_mempool_free(eventdev_test_mempool);
252 }
253
254 static inline void
255 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
256                         uint32_t flow_id, uint8_t event_type,
257                         uint8_t sub_event_type, uint8_t sched_type,
258                         uint8_t queue, uint8_t port)
259 {
260         struct event_attr *attr;
261
262         /* Store the event attributes in mbuf for future reference */
263         attr = rte_pktmbuf_mtod(m, struct event_attr *);
264         attr->flow_id = flow_id;
265         attr->event_type = event_type;
266         attr->sub_event_type = sub_event_type;
267         attr->sched_type = sched_type;
268         attr->queue = queue;
269         attr->port = port;
270
271         ev->flow_id = flow_id;
272         ev->sub_event_type = sub_event_type;
273         ev->event_type = event_type;
274         /* Inject the new event */
275         ev->op = RTE_EVENT_OP_NEW;
276         ev->sched_type = sched_type;
277         ev->queue_id = queue;
278         ev->mbuf = m;
279 }
280
281 static inline int
282 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
283                 uint8_t sched_type, uint8_t queue, uint8_t port,
284                 unsigned int events)
285 {
286         struct rte_mbuf *m;
287         unsigned int i;
288
289         for (i = 0; i < events; i++) {
290                 struct rte_event ev = {.event = 0, .u64 = 0};
291
292                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
293                 TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
294
295                 m->seqn = i;
296                 update_event_and_validation_attr(m, &ev, flow_id, event_type,
297                         sub_event_type, sched_type, queue, port);
298                 rte_event_enqueue_burst(evdev, port, &ev, 1);
299         }
300         return 0;
301 }
302
303 static inline int
304 check_excess_events(uint8_t port)
305 {
306         int i;
307         uint16_t valid_event;
308         struct rte_event ev;
309
310         /* Check for excess events, try for a few times and exit */
311         for (i = 0; i < 32; i++) {
312                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
313
314                 TEST_ASSERT_SUCCESS(valid_event, "Unexpected valid event=%d",
315                                         ev.mbuf->seqn);
316         }
317         return 0;
318 }
319
320 static inline int
321 generate_random_events(const unsigned int total_events)
322 {
323         struct rte_event_dev_info info;
324         unsigned int i;
325         int ret;
326
327         ret = rte_event_dev_info_get(evdev, &info);
328         TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
329         for (i = 0; i < total_events; i++) {
330                 ret = inject_events(
331                         rte_rand() % info.max_event_queue_flows /*flow_id */,
332                         rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
333                         rte_rand() % 256 /* sub_event_type */,
334                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
335                         rte_rand() % rte_event_queue_count(evdev) /* queue */,
336                         0 /* port */,
337                         1 /* events */);
338                 if (ret)
339                         return TEST_FAILED;
340         }
341         return ret;
342 }
343
344
345 static inline int
346 validate_event(struct rte_event *ev)
347 {
348         struct event_attr *attr;
349
350         attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
351         TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
352                         "flow_id mismatch enq=%d deq =%d",
353                         attr->flow_id, ev->flow_id);
354         TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
355                         "event_type mismatch enq=%d deq =%d",
356                         attr->event_type, ev->event_type);
357         TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
358                         "sub_event_type mismatch enq=%d deq =%d",
359                         attr->sub_event_type, ev->sub_event_type);
360         TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
361                         "sched_type mismatch enq=%d deq =%d",
362                         attr->sched_type, ev->sched_type);
363         TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
364                         "queue mismatch enq=%d deq =%d",
365                         attr->queue, ev->queue_id);
366         return 0;
367 }
368
369 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
370                                  struct rte_event *ev);
371
372 static inline int
373 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
374 {
375         int ret;
376         uint16_t valid_event;
377         uint32_t events = 0, forward_progress_cnt = 0, index = 0;
378         struct rte_event ev;
379
380         while (1) {
381                 if (++forward_progress_cnt > UINT16_MAX) {
382                         printf("Detected deadlock\n");
383                         return TEST_FAILED;
384                 }
385
386                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
387                 if (!valid_event)
388                         continue;
389
390                 forward_progress_cnt = 0;
391                 ret = validate_event(&ev);
392                 if (ret)
393                         return TEST_FAILED;
394
395                 if (fn != NULL) {
396                         ret = fn(index, port, &ev);
397                         TEST_ASSERT_SUCCESS(ret,
398                                 "Failed to validate test specific event");
399                 }
400
401                 ++index;
402
403                 rte_pktmbuf_free(ev.mbuf);
404                 if (++events >= total_events)
405                         break;
406         }
407
408         return check_excess_events(port);
409 }
410
411 static int
412 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
413 {
414         RTE_SET_USED(port);
415         TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d", index,
416                                         ev->mbuf->seqn);
417         return 0;
418 }
419
420 static inline int
421 test_simple_enqdeq(uint8_t sched_type)
422 {
423         int ret;
424
425         ret = inject_events(0 /*flow_id */,
426                                 RTE_EVENT_TYPE_CPU /* event_type */,
427                                 0 /* sub_event_type */,
428                                 sched_type,
429                                 0 /* queue */,
430                                 0 /* port */,
431                                 MAX_EVENTS);
432         if (ret)
433                 return TEST_FAILED;
434
435         return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
436 }
437
438 static int
439 test_simple_enqdeq_ordered(void)
440 {
441         return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
442 }
443
444 static int
445 test_simple_enqdeq_atomic(void)
446 {
447         return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
448 }
449
450 static int
451 test_simple_enqdeq_parallel(void)
452 {
453         return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
454 }
455
456 /*
457  * Generate a prescribed number of events and spread them across available
458  * queues. On dequeue, using single event port(port 0) verify the enqueued
459  * event attributes
460  */
461 static int
462 test_multi_queue_enq_single_port_deq(void)
463 {
464         int ret;
465
466         ret = generate_random_events(MAX_EVENTS);
467         if (ret)
468                 return TEST_FAILED;
469
470         return consume_events(0 /* port */, MAX_EVENTS, NULL);
471 }
472
473 /*
474  * Inject 0..MAX_EVENTS events over 0..rte_event_queue_count() with modulus
475  * operation
476  *
477  * For example, Inject 32 events over 0..7 queues
478  * enqueue events 0, 8, 16, 24 in queue 0
479  * enqueue events 1, 9, 17, 25 in queue 1
480  * ..
481  * ..
482  * enqueue events 7, 15, 23, 31 in queue 7
483  *
484  * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
485  * order from queue0(highest priority) to queue7(lowest_priority)
486  */
487 static int
488 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
489 {
490         uint32_t range = MAX_EVENTS / rte_event_queue_count(evdev);
491         uint32_t expected_val = (index % range) * rte_event_queue_count(evdev);
492
493         expected_val += ev->queue_id;
494         RTE_SET_USED(port);
495         TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
496         "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
497                         ev->mbuf->seqn, index, expected_val, range,
498                         rte_event_queue_count(evdev), MAX_EVENTS);
499         return 0;
500 }
501
502 static int
503 test_multi_queue_priority(void)
504 {
505         uint8_t queue;
506         struct rte_mbuf *m;
507         int i, max_evts_roundoff;
508
509         /* See validate_queue_priority() comments for priority validate logic */
510         max_evts_roundoff  = MAX_EVENTS / rte_event_queue_count(evdev);
511         max_evts_roundoff *= rte_event_queue_count(evdev);
512
513         for (i = 0; i < max_evts_roundoff; i++) {
514                 struct rte_event ev = {.event = 0, .u64 = 0};
515
516                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
517                 TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
518
519                 m->seqn = i;
520                 queue = i % rte_event_queue_count(evdev);
521                 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
522                         0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
523                 rte_event_enqueue_burst(evdev, 0, &ev, 1);
524         }
525
526         return consume_events(0, max_evts_roundoff, validate_queue_priority);
527 }
528
529 static int
530 worker_multi_port_fn(void *arg)
531 {
532         struct test_core_param *param = arg;
533         struct rte_event ev;
534         uint16_t valid_event;
535         uint8_t port = param->port;
536         rte_atomic32_t *total_events = param->total_events;
537         int ret;
538
539         while (rte_atomic32_read(total_events) > 0) {
540                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
541                 if (!valid_event)
542                         continue;
543
544                 ret = validate_event(&ev);
545                 TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
546                 rte_pktmbuf_free(ev.mbuf);
547                 rte_atomic32_sub(total_events, 1);
548         }
549         return 0;
550 }
551
552 static inline int
553 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
554 {
555         uint64_t cycles, print_cycles;
556
557         print_cycles = cycles = rte_get_timer_cycles();
558         while (rte_eal_get_lcore_state(lcore) != FINISHED) {
559                 uint64_t new_cycles = rte_get_timer_cycles();
560
561                 if (new_cycles - print_cycles > rte_get_timer_hz()) {
562                         printf("\r%s: events %d\n", __func__,
563                                 rte_atomic32_read(count));
564                         print_cycles = new_cycles;
565                 }
566                 if (new_cycles - cycles > rte_get_timer_hz() * 10) {
567                         printf("%s: No schedules for seconds, deadlock (%d)\n",
568                                 __func__,
569                                 rte_atomic32_read(count));
570                         rte_event_dev_dump(evdev, stdout);
571                         cycles = new_cycles;
572                         return TEST_FAILED;
573                 }
574         }
575         rte_eal_mp_wait_lcore();
576         return TEST_SUCCESS;
577 }
578
579
580 static inline int
581 launch_workers_and_wait(int (*master_worker)(void *),
582                         int (*slave_workers)(void *), uint32_t total_events,
583                         uint8_t nb_workers, uint8_t sched_type)
584 {
585         uint8_t port = 0;
586         int w_lcore;
587         int ret;
588         struct test_core_param *param;
589         rte_atomic32_t atomic_total_events;
590         uint64_t dequeue_tmo_ticks;
591
592         if (!nb_workers)
593                 return 0;
594
595         rte_atomic32_set(&atomic_total_events, total_events);
596         seqn_list_init();
597
598         param = malloc(sizeof(struct test_core_param) * nb_workers);
599         if (!param)
600                 return TEST_FAILED;
601
602         ret = rte_event_dequeue_timeout_ticks(evdev,
603                 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
604         if (ret)
605                 return TEST_FAILED;
606
607         param[0].total_events = &atomic_total_events;
608         param[0].sched_type = sched_type;
609         param[0].port = 0;
610         param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
611         rte_smp_wmb();
612
613         w_lcore = rte_get_next_lcore(
614                         /* start core */ -1,
615                         /* skip master */ 1,
616                         /* wrap */ 0);
617         rte_eal_remote_launch(master_worker, &param[0], w_lcore);
618
619         for (port = 1; port < nb_workers; port++) {
620                 param[port].total_events = &atomic_total_events;
621                 param[port].sched_type = sched_type;
622                 param[port].port = port;
623                 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
624                 rte_smp_wmb();
625                 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
626                 rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
627         }
628
629         ret = wait_workers_to_join(w_lcore, &atomic_total_events);
630         free(param);
631         return ret;
632 }
633
634 /*
635  * Generate a prescribed number of events and spread them across available
636  * queues. Dequeue the events through multiple ports and verify the enqueued
637  * event attributes
638  */
639 static int
640 test_multi_queue_enq_multi_port_deq(void)
641 {
642         const unsigned int total_events = MAX_EVENTS;
643         uint8_t nr_ports;
644         int ret;
645
646         ret = generate_random_events(total_events);
647         if (ret)
648                 return TEST_FAILED;
649
650         nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
651
652         if (!nr_ports) {
653                 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
654                         rte_event_port_count(evdev), rte_lcore_count() - 1);
655                 return TEST_SUCCESS;
656         }
657
658         return launch_workers_and_wait(worker_multi_port_fn,
659                                         worker_multi_port_fn, total_events,
660                                         nr_ports, 0xff /* invalid */);
661 }
662
663 static int
664 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
665                         struct rte_event *ev)
666 {
667         RTE_SET_USED(index);
668         TEST_ASSERT_EQUAL(port, ev->queue_id,
669                                 "queue mismatch enq=%d deq =%d",
670                                 port, ev->queue_id);
671         return 0;
672 }
673
674 /*
675  * Link queue x to port x and check correctness of link by checking
676  * queue_id == x on dequeue on the specific port x
677  */
678 static int
679 test_queue_to_port_single_link(void)
680 {
681         int i, nr_links, ret;
682
683         /* Unlink all connections that created in eventdev_setup */
684         for (i = 0; i < rte_event_port_count(evdev); i++) {
685                 ret = rte_event_port_unlink(evdev, i, NULL, 0);
686                 TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d", i);
687         }
688
689         nr_links = RTE_MIN(rte_event_port_count(evdev),
690                                 rte_event_queue_count(evdev));
691         const unsigned int total_events = MAX_EVENTS / nr_links;
692
693         /* Link queue x to port x and inject events to queue x through port x */
694         for (i = 0; i < nr_links; i++) {
695                 uint8_t queue = (uint8_t)i;
696
697                 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
698                 TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
699
700                 ret = inject_events(
701                         0x100 /*flow_id */,
702                         rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
703                         rte_rand() % 256 /* sub_event_type */,
704                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
705                         queue /* queue */,
706                         i /* port */,
707                         total_events /* events */);
708                 if (ret)
709                         return TEST_FAILED;
710         }
711
712         /* Verify the events generated from correct queue */
713         for (i = 0; i < nr_links; i++) {
714                 ret = consume_events(i /* port */, total_events,
715                                 validate_queue_to_port_single_link);
716                 if (ret)
717                         return TEST_FAILED;
718         }
719
720         return TEST_SUCCESS;
721 }
722
723 static int
724 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
725                         struct rte_event *ev)
726 {
727         RTE_SET_USED(index);
728         TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
729                                 "queue mismatch enq=%d deq =%d",
730                                 port, ev->queue_id);
731         return 0;
732 }
733
734 /*
735  * Link all even number of queues to port 0 and all odd number of queues to
736  * port 1 and verify the link connection on dequeue
737  */
738 static int
739 test_queue_to_port_multi_link(void)
740 {
741         int ret, port0_events = 0, port1_events = 0;
742         uint8_t nr_queues, nr_ports, queue, port;
743
744         nr_queues = rte_event_queue_count(evdev);
745         nr_ports = rte_event_port_count(evdev);
746
747         if (nr_ports < 2) {
748                 printf("%s: Not enough ports to test ports=%d\n",
749                                 __func__, nr_ports);
750                 return TEST_SUCCESS;
751         }
752
753         /* Unlink all connections that created in eventdev_setup */
754         for (port = 0; port < nr_ports; port++) {
755                 ret = rte_event_port_unlink(evdev, port, NULL, 0);
756                 TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
757                                         port);
758         }
759
760         const unsigned int total_events = MAX_EVENTS / nr_queues;
761
762         /* Link all even number of queues to port0 and odd numbers to port 1*/
763         for (queue = 0; queue < nr_queues; queue++) {
764                 port = queue & 0x1;
765                 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
766                 TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
767                                         queue, port);
768
769                 ret = inject_events(
770                         0x100 /*flow_id */,
771                         rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
772                         rte_rand() % 256 /* sub_event_type */,
773                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
774                         queue /* queue */,
775                         port /* port */,
776                         total_events /* events */);
777                 if (ret)
778                         return TEST_FAILED;
779
780                 if (port == 0)
781                         port0_events += total_events;
782                 else
783                         port1_events += total_events;
784         }
785
786         ret = consume_events(0 /* port */, port0_events,
787                                 validate_queue_to_port_multi_link);
788         if (ret)
789                 return TEST_FAILED;
790         ret = consume_events(1 /* port */, port1_events,
791                                 validate_queue_to_port_multi_link);
792         if (ret)
793                 return TEST_FAILED;
794
795         return TEST_SUCCESS;
796 }
797
798 static int
799 worker_flow_based_pipeline(void *arg)
800 {
801         struct test_core_param *param = arg;
802         struct rte_event ev;
803         uint16_t valid_event;
804         uint8_t port = param->port;
805         uint8_t new_sched_type = param->sched_type;
806         rte_atomic32_t *total_events = param->total_events;
807         uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
808
809         while (rte_atomic32_read(total_events) > 0) {
810                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
811                                         dequeue_tmo_ticks);
812                 if (!valid_event)
813                         continue;
814
815                 /* Events from stage 0 */
816                 if (ev.sub_event_type == 0) {
817                         /* Move to atomic flow to maintain the ordering */
818                         ev.flow_id = 0x2;
819                         ev.event_type = RTE_EVENT_TYPE_CPU;
820                         ev.sub_event_type = 1; /* stage 1 */
821                         ev.sched_type = new_sched_type;
822                         ev.op = RTE_EVENT_OP_FORWARD;
823                         rte_event_enqueue_burst(evdev, port, &ev, 1);
824                 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
825                         if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
826                                 rte_pktmbuf_free(ev.mbuf);
827                                 rte_atomic32_sub(total_events, 1);
828                         } else {
829                                 printf("Failed to update seqn_list\n");
830                                 return TEST_FAILED;
831                         }
832                 } else {
833                         printf("Invalid ev.sub_event_type = %d\n",
834                                         ev.sub_event_type);
835                         return TEST_FAILED;
836                 }
837         }
838         return 0;
839 }
840
841 static int
842 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
843                         uint8_t out_sched_type)
844 {
845         const unsigned int total_events = MAX_EVENTS;
846         uint8_t nr_ports;
847         int ret;
848
849         nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
850
851         if (!nr_ports) {
852                 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
853                         rte_event_port_count(evdev), rte_lcore_count() - 1);
854                 return TEST_SUCCESS;
855         }
856
857         /* Injects events with m->seqn=0 to total_events */
858         ret = inject_events(
859                 0x1 /*flow_id */,
860                 RTE_EVENT_TYPE_CPU /* event_type */,
861                 0 /* sub_event_type (stage 0) */,
862                 in_sched_type,
863                 0 /* queue */,
864                 0 /* port */,
865                 total_events /* events */);
866         if (ret)
867                 return TEST_FAILED;
868
869         ret = launch_workers_and_wait(worker_flow_based_pipeline,
870                                         worker_flow_based_pipeline,
871                                         total_events, nr_ports, out_sched_type);
872         if (ret)
873                 return TEST_FAILED;
874
875         if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
876                         out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
877                 /* Check the events order maintained or not */
878                 return seqn_list_check(total_events);
879         }
880         return TEST_SUCCESS;
881 }
882
883
884 /* Multi port ordered to atomic transaction */
885 static int
886 test_multi_port_flow_ordered_to_atomic(void)
887 {
888         /* Ingress event order test */
889         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
890                                 RTE_SCHED_TYPE_ATOMIC);
891 }
892
893 static int
894 test_multi_port_flow_ordered_to_ordered(void)
895 {
896         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
897                                 RTE_SCHED_TYPE_ORDERED);
898 }
899
900 static int
901 test_multi_port_flow_ordered_to_parallel(void)
902 {
903         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
904                                 RTE_SCHED_TYPE_PARALLEL);
905 }
906
907 static int
908 test_multi_port_flow_atomic_to_atomic(void)
909 {
910         /* Ingress event order test */
911         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
912                                 RTE_SCHED_TYPE_ATOMIC);
913 }
914
915 static int
916 test_multi_port_flow_atomic_to_ordered(void)
917 {
918         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
919                                 RTE_SCHED_TYPE_ORDERED);
920 }
921
922 static int
923 test_multi_port_flow_atomic_to_parallel(void)
924 {
925         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
926                                 RTE_SCHED_TYPE_PARALLEL);
927 }
928
929 static int
930 test_multi_port_flow_parallel_to_atomic(void)
931 {
932         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
933                                 RTE_SCHED_TYPE_ATOMIC);
934 }
935
936 static int
937 test_multi_port_flow_parallel_to_ordered(void)
938 {
939         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
940                                 RTE_SCHED_TYPE_ORDERED);
941 }
942
943 static int
944 test_multi_port_flow_parallel_to_parallel(void)
945 {
946         return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
947                                 RTE_SCHED_TYPE_PARALLEL);
948 }
949
950 static int
951 worker_group_based_pipeline(void *arg)
952 {
953         struct test_core_param *param = arg;
954         struct rte_event ev;
955         uint16_t valid_event;
956         uint8_t port = param->port;
957         uint8_t new_sched_type = param->sched_type;
958         rte_atomic32_t *total_events = param->total_events;
959         uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
960
961         while (rte_atomic32_read(total_events) > 0) {
962                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
963                                         dequeue_tmo_ticks);
964                 if (!valid_event)
965                         continue;
966
967                 /* Events from stage 0(group 0) */
968                 if (ev.queue_id == 0) {
969                         /* Move to atomic flow to maintain the ordering */
970                         ev.flow_id = 0x2;
971                         ev.event_type = RTE_EVENT_TYPE_CPU;
972                         ev.sched_type = new_sched_type;
973                         ev.queue_id = 1; /* Stage 1*/
974                         ev.op = RTE_EVENT_OP_FORWARD;
975                         rte_event_enqueue_burst(evdev, port, &ev, 1);
976                 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
977                         if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
978                                 rte_pktmbuf_free(ev.mbuf);
979                                 rte_atomic32_sub(total_events, 1);
980                         } else {
981                                 printf("Failed to update seqn_list\n");
982                                 return TEST_FAILED;
983                         }
984                 } else {
985                         printf("Invalid ev.queue_id = %d\n", ev.queue_id);
986                         return TEST_FAILED;
987                 }
988         }
989
990
991         return 0;
992 }
993
994 static int
995 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
996                         uint8_t out_sched_type)
997 {
998         const unsigned int total_events = MAX_EVENTS;
999         uint8_t nr_ports;
1000         int ret;
1001
1002         nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
1003
1004         if (rte_event_queue_count(evdev) < 2 ||  !nr_ports) {
1005                 printf("%s: Not enough queues=%d ports=%d or workers=%d\n",
1006                          __func__, rte_event_queue_count(evdev),
1007                         rte_event_port_count(evdev), rte_lcore_count() - 1);
1008                 return TEST_SUCCESS;
1009         }
1010
1011         /* Injects events with m->seqn=0 to total_events */
1012         ret = inject_events(
1013                 0x1 /*flow_id */,
1014                 RTE_EVENT_TYPE_CPU /* event_type */,
1015                 0 /* sub_event_type (stage 0) */,
1016                 in_sched_type,
1017                 0 /* queue */,
1018                 0 /* port */,
1019                 total_events /* events */);
1020         if (ret)
1021                 return TEST_FAILED;
1022
1023         ret = launch_workers_and_wait(worker_group_based_pipeline,
1024                                         worker_group_based_pipeline,
1025                                         total_events, nr_ports, out_sched_type);
1026         if (ret)
1027                 return TEST_FAILED;
1028
1029         if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1030                         out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1031                 /* Check the events order maintained or not */
1032                 return seqn_list_check(total_events);
1033         }
1034         return TEST_SUCCESS;
1035 }
1036
1037 static int
1038 test_multi_port_queue_ordered_to_atomic(void)
1039 {
1040         /* Ingress event order test */
1041         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1042                                 RTE_SCHED_TYPE_ATOMIC);
1043 }
1044
1045 static int
1046 test_multi_port_queue_ordered_to_ordered(void)
1047 {
1048         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1049                                 RTE_SCHED_TYPE_ORDERED);
1050 }
1051
1052 static int
1053 test_multi_port_queue_ordered_to_parallel(void)
1054 {
1055         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1056                                 RTE_SCHED_TYPE_PARALLEL);
1057 }
1058
1059 static int
1060 test_multi_port_queue_atomic_to_atomic(void)
1061 {
1062         /* Ingress event order test */
1063         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1064                                 RTE_SCHED_TYPE_ATOMIC);
1065 }
1066
1067 static int
1068 test_multi_port_queue_atomic_to_ordered(void)
1069 {
1070         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1071                                 RTE_SCHED_TYPE_ORDERED);
1072 }
1073
1074 static int
1075 test_multi_port_queue_atomic_to_parallel(void)
1076 {
1077         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1078                                 RTE_SCHED_TYPE_PARALLEL);
1079 }
1080
1081 static int
1082 test_multi_port_queue_parallel_to_atomic(void)
1083 {
1084         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1085                                 RTE_SCHED_TYPE_ATOMIC);
1086 }
1087
1088 static int
1089 test_multi_port_queue_parallel_to_ordered(void)
1090 {
1091         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1092                                 RTE_SCHED_TYPE_ORDERED);
1093 }
1094
1095 static int
1096 test_multi_port_queue_parallel_to_parallel(void)
1097 {
1098         return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1099                                 RTE_SCHED_TYPE_PARALLEL);
1100 }
1101
1102 static int
1103 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1104 {
1105         struct test_core_param *param = arg;
1106         struct rte_event ev;
1107         uint16_t valid_event;
1108         uint8_t port = param->port;
1109         rte_atomic32_t *total_events = param->total_events;
1110
1111         while (rte_atomic32_read(total_events) > 0) {
1112                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1113                 if (!valid_event)
1114                         continue;
1115
1116                 if (ev.sub_event_type == 255) { /* last stage */
1117                         rte_pktmbuf_free(ev.mbuf);
1118                         rte_atomic32_sub(total_events, 1);
1119                 } else {
1120                         ev.event_type = RTE_EVENT_TYPE_CPU;
1121                         ev.sub_event_type++;
1122                         ev.sched_type =
1123                                 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1124                         ev.op = RTE_EVENT_OP_FORWARD;
1125                         rte_event_enqueue_burst(evdev, port, &ev, 1);
1126                 }
1127         }
1128         return 0;
1129 }
1130
1131 static int
1132 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1133 {
1134         uint8_t nr_ports;
1135         int ret;
1136
1137         nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
1138
1139         if (!nr_ports) {
1140                 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
1141                         rte_event_port_count(evdev), rte_lcore_count() - 1);
1142                 return TEST_SUCCESS;
1143         }
1144
1145         /* Injects events with m->seqn=0 to total_events */
1146         ret = inject_events(
1147                 0x1 /*flow_id */,
1148                 RTE_EVENT_TYPE_CPU /* event_type */,
1149                 0 /* sub_event_type (stage 0) */,
1150                 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1151                 0 /* queue */,
1152                 0 /* port */,
1153                 MAX_EVENTS /* events */);
1154         if (ret)
1155                 return TEST_FAILED;
1156
1157         return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1158                                          0xff /* invalid */);
1159 }
1160
1161 /* Flow based pipeline with maximum stages with random sched type */
1162 static int
1163 test_multi_port_flow_max_stages_random_sched_type(void)
1164 {
1165         return launch_multi_port_max_stages_random_sched_type(
1166                 worker_flow_based_pipeline_max_stages_rand_sched_type);
1167 }
1168
1169 static int
1170 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1171 {
1172         struct test_core_param *param = arg;
1173         struct rte_event ev;
1174         uint16_t valid_event;
1175         uint8_t port = param->port;
1176         uint8_t nr_queues = rte_event_queue_count(evdev);
1177         rte_atomic32_t *total_events = param->total_events;
1178
1179         while (rte_atomic32_read(total_events) > 0) {
1180                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1181                 if (!valid_event)
1182                         continue;
1183
1184                 if (ev.queue_id == nr_queues - 1) { /* last stage */
1185                         rte_pktmbuf_free(ev.mbuf);
1186                         rte_atomic32_sub(total_events, 1);
1187                 } else {
1188                         ev.event_type = RTE_EVENT_TYPE_CPU;
1189                         ev.queue_id++;
1190                         ev.sched_type =
1191                                 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1192                         ev.op = RTE_EVENT_OP_FORWARD;
1193                         rte_event_enqueue_burst(evdev, port, &ev, 1);
1194                 }
1195         }
1196         return 0;
1197 }
1198
1199 /* Queue based pipeline with maximum stages with random sched type */
1200 static int
1201 test_multi_port_queue_max_stages_random_sched_type(void)
1202 {
1203         return launch_multi_port_max_stages_random_sched_type(
1204                 worker_queue_based_pipeline_max_stages_rand_sched_type);
1205 }
1206 static struct unit_test_suite eventdev_octeontx_testsuite  = {
1207         .suite_name = "eventdev octeontx unit test suite",
1208         .setup = testsuite_setup,
1209         .teardown = testsuite_teardown,
1210         .unit_test_cases = {
1211                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1212                         test_simple_enqdeq_ordered),
1213                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1214                         test_simple_enqdeq_atomic),
1215                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1216                         test_simple_enqdeq_parallel),
1217                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1218                         test_multi_queue_enq_single_port_deq),
1219                 TEST_CASE_ST(eventdev_setup_priority, eventdev_teardown,
1220                         test_multi_queue_priority),
1221                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1222                         test_multi_queue_enq_multi_port_deq),
1223                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1224                         test_queue_to_port_single_link),
1225                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1226                         test_queue_to_port_multi_link),
1227                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1228                         test_multi_port_flow_ordered_to_atomic),
1229                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1230                         test_multi_port_flow_ordered_to_ordered),
1231                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1232                         test_multi_port_flow_ordered_to_parallel),
1233                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1234                         test_multi_port_flow_atomic_to_atomic),
1235                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1236                         test_multi_port_flow_atomic_to_ordered),
1237                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1238                         test_multi_port_flow_atomic_to_parallel),
1239                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1240                         test_multi_port_flow_parallel_to_atomic),
1241                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1242                         test_multi_port_flow_parallel_to_ordered),
1243                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1244                         test_multi_port_flow_parallel_to_parallel),
1245                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1246                         test_multi_port_queue_ordered_to_atomic),
1247                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1248                         test_multi_port_queue_ordered_to_ordered),
1249                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1250                         test_multi_port_queue_ordered_to_parallel),
1251                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1252                         test_multi_port_queue_atomic_to_atomic),
1253                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1254                         test_multi_port_queue_atomic_to_ordered),
1255                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1256                         test_multi_port_queue_atomic_to_parallel),
1257                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1258                         test_multi_port_queue_parallel_to_atomic),
1259                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1260                         test_multi_port_queue_parallel_to_ordered),
1261                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1262                         test_multi_port_queue_parallel_to_parallel),
1263                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1264                         test_multi_port_flow_max_stages_random_sched_type),
1265                 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1266                         test_multi_port_queue_max_stages_random_sched_type),
1267                 TEST_CASES_END() /**< NULL terminate unit test array */
1268         }
1269 };
1270
1271 static int
1272 test_eventdev_octeontx(void)
1273 {
1274         return unit_test_suite_runner(&eventdev_octeontx_testsuite);
1275 }
1276
1277 REGISTER_TEST_COMMAND(eventdev_octeontx_autotest, test_eventdev_octeontx);