vdpa/mlx5: fix steering update in virtq unset
[dpdk.git] / drivers / event / dpaa2 / dpaa2_eventdev_selftest.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_debug.h>
9 #include <rte_eal.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_launch.h>
17 #include <rte_lcore.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
20 #include <rte_bus_vdev.h>
21 #include <rte_test.h>
22
23 #include "dpaa2_eventdev.h"
24 #include "dpaa2_eventdev_logs.h"
25
26 #define MAX_PORTS 4
27 #define NUM_PACKETS (1 << 18)
28 #define MAX_EVENTS  8
29 #define DPAA2_TEST_RUN(setup, teardown, test) \
30         dpaa2_test_run(setup, teardown, test, #test)
31
32 static int total;
33 static int passed;
34 static int failed;
35 static int unsupported;
36
37 static int evdev;
38 static struct rte_mempool *eventdev_test_mempool;
39
40 struct event_attr {
41         uint32_t flow_id;
42         uint8_t event_type;
43         uint8_t sub_event_type;
44         uint8_t sched_type;
45         uint8_t queue;
46         uint8_t port;
47         uint8_t seq;
48 };
49
50 static uint32_t seqn_list_index;
51 static int seqn_list[NUM_PACKETS];
52
53 static void
54 seqn_list_init(void)
55 {
56         RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
57         memset(seqn_list, 0, sizeof(seqn_list));
58         seqn_list_index = 0;
59 }
60
61 struct test_core_param {
62         rte_atomic32_t *total_events;
63         uint64_t dequeue_tmo_ticks;
64         uint8_t port;
65         uint8_t sched_type;
66 };
67
68 static int
69 testsuite_setup(void)
70 {
71         const char *eventdev_name = "event_dpaa2";
72
73         evdev = rte_event_dev_get_dev_id(eventdev_name);
74         if (evdev < 0) {
75                 dpaa2_evdev_dbg("%d: Eventdev %s not found - creating.",
76                                 __LINE__, eventdev_name);
77                 if (rte_vdev_init(eventdev_name, NULL) < 0) {
78                         dpaa2_evdev_err("Error creating eventdev %s",
79                                         eventdev_name);
80                         return -1;
81                 }
82                 evdev = rte_event_dev_get_dev_id(eventdev_name);
83                 if (evdev < 0) {
84                         dpaa2_evdev_err("Error finding newly created eventdev");
85                         return -1;
86                 }
87         }
88
89         return 0;
90 }
91
92 static void
93 testsuite_teardown(void)
94 {
95         rte_event_dev_close(evdev);
96 }
97
98 static void
99 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
100                         struct rte_event_dev_info *info)
101 {
102         memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
103         dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
104         dev_conf->nb_event_ports = info->max_event_ports;
105         dev_conf->nb_event_queues = info->max_event_queues;
106         dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
107         dev_conf->nb_event_port_dequeue_depth =
108                         info->max_event_port_dequeue_depth;
109         dev_conf->nb_event_port_enqueue_depth =
110                         info->max_event_port_enqueue_depth;
111         dev_conf->nb_event_port_enqueue_depth =
112                         info->max_event_port_enqueue_depth;
113         dev_conf->nb_events_limit =
114                         info->max_num_events;
115 }
116
117 enum {
118         TEST_EVENTDEV_SETUP_DEFAULT,
119         TEST_EVENTDEV_SETUP_PRIORITY,
120         TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
121 };
122
123 static int
124 _eventdev_setup(int mode)
125 {
126         int i, ret;
127         struct rte_event_dev_config dev_conf;
128         struct rte_event_dev_info info;
129         const char *pool_name = "evdev_dpaa2_test_pool";
130
131         /* Create and destrory pool for each test case to make it standalone */
132         eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
133                                         MAX_EVENTS,
134                                         0 /*MBUF_CACHE_SIZE*/,
135                                         0,
136                                         512, /* Use very small mbufs */
137                                         rte_socket_id());
138         if (!eventdev_test_mempool) {
139                 dpaa2_evdev_err("ERROR creating mempool");
140                 return -1;
141         }
142
143         ret = rte_event_dev_info_get(evdev, &info);
144         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
145         RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
146                         "ERROR max_num_events=%d < max_events=%d",
147                                 info.max_num_events, MAX_EVENTS);
148
149         devconf_set_default_sane_values(&dev_conf, &info);
150         if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
151                 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
152
153         ret = rte_event_dev_configure(evdev, &dev_conf);
154         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
155
156         uint32_t queue_count;
157         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
158                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
159                             &queue_count), "Queue count get failed");
160
161         if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
162                 if (queue_count > 8) {
163                         dpaa2_evdev_err(
164                                 "test expects the unique priority per queue");
165                         return -ENOTSUP;
166                 }
167
168                 /* Configure event queues(0 to n) with
169                  * RTE_EVENT_DEV_PRIORITY_HIGHEST to
170                  * RTE_EVENT_DEV_PRIORITY_LOWEST
171                  */
172                 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
173                                 queue_count;
174                 for (i = 0; i < (int)queue_count; i++) {
175                         struct rte_event_queue_conf queue_conf;
176
177                         ret = rte_event_queue_default_conf_get(evdev, i,
178                                                 &queue_conf);
179                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
180                                         i);
181                         queue_conf.priority = i * step;
182                         ret = rte_event_queue_setup(evdev, i, &queue_conf);
183                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
184                                         i);
185                 }
186
187         } else {
188                 /* Configure event queues with default priority */
189                 for (i = 0; i < (int)queue_count; i++) {
190                         ret = rte_event_queue_setup(evdev, i, NULL);
191                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
192                                         i);
193                 }
194         }
195         /* Configure event ports */
196         uint32_t port_count;
197         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
198                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
199                                 &port_count), "Port count get failed");
200         for (i = 0; i < (int)port_count; i++) {
201                 ret = rte_event_port_setup(evdev, i, NULL);
202                 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
203                 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
204                 RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
205                                 i);
206         }
207
208         ret = rte_event_dev_start(evdev);
209         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
210
211         return 0;
212 }
213
214 static int
215 eventdev_setup(void)
216 {
217         return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
218 }
219
220 static void
221 eventdev_teardown(void)
222 {
223         rte_event_dev_stop(evdev);
224         rte_mempool_free(eventdev_test_mempool);
225 }
226
227 static void
228 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
229                         uint32_t flow_id, uint8_t event_type,
230                         uint8_t sub_event_type, uint8_t sched_type,
231                         uint8_t queue, uint8_t port, uint8_t seq)
232 {
233         struct event_attr *attr;
234
235         /* Store the event attributes in mbuf for future reference */
236         attr = rte_pktmbuf_mtod(m, struct event_attr *);
237         attr->flow_id = flow_id;
238         attr->event_type = event_type;
239         attr->sub_event_type = sub_event_type;
240         attr->sched_type = sched_type;
241         attr->queue = queue;
242         attr->port = port;
243         attr->seq = seq;
244
245         ev->flow_id = flow_id;
246         ev->sub_event_type = sub_event_type;
247         ev->event_type = event_type;
248         /* Inject the new event */
249         ev->op = RTE_EVENT_OP_NEW;
250         ev->sched_type = sched_type;
251         ev->queue_id = queue;
252         ev->mbuf = m;
253 }
254
255 static int
256 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
257                 uint8_t sched_type, uint8_t queue, uint8_t port,
258                 unsigned int events)
259 {
260         struct rte_mbuf *m;
261         unsigned int i;
262
263         for (i = 0; i < events; i++) {
264                 struct rte_event ev = {.event = 0, .u64 = 0};
265
266                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
267                 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
268
269                 update_event_and_validation_attr(m, &ev, flow_id, event_type,
270                         sub_event_type, sched_type, queue, port, i);
271                 rte_event_enqueue_burst(evdev, port, &ev, 1);
272         }
273         return 0;
274 }
275
276 static int
277 check_excess_events(uint8_t port)
278 {
279         int i;
280         uint16_t valid_event;
281         struct rte_event ev;
282
283         /* Check for excess events, try for a few times and exit */
284         for (i = 0; i < 32; i++) {
285                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
286
287                 RTE_TEST_ASSERT_SUCCESS(valid_event,
288                                 "Unexpected valid event=%d", ev.mbuf->seqn);
289         }
290         return 0;
291 }
292
293 static int
294 generate_random_events(const unsigned int total_events)
295 {
296         struct rte_event_dev_info info;
297         unsigned int i;
298         int ret;
299
300         uint32_t queue_count;
301         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
302                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
303                             &queue_count), "Queue count get failed");
304
305         ret = rte_event_dev_info_get(evdev, &info);
306         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
307         for (i = 0; i < total_events; i++) {
308                 ret = inject_events(
309                         rte_rand() % info.max_event_queue_flows /*flow_id */,
310                         RTE_EVENT_TYPE_CPU /* event_type */,
311                         rte_rand() % 256 /* sub_event_type */,
312                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
313                         rte_rand() % queue_count /* queue */,
314                         0 /* port */,
315                         1 /* events */);
316                 if (ret)
317                         return -1;
318         }
319         return ret;
320 }
321
322
323 static int
324 validate_event(struct rte_event *ev)
325 {
326         struct event_attr *attr;
327
328         attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
329         RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
330                         "flow_id mismatch enq=%d deq =%d",
331                         attr->flow_id, ev->flow_id);
332         RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
333                         "event_type mismatch enq=%d deq =%d",
334                         attr->event_type, ev->event_type);
335         RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
336                         "sub_event_type mismatch enq=%d deq =%d",
337                         attr->sub_event_type, ev->sub_event_type);
338         RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
339                         "sched_type mismatch enq=%d deq =%d",
340                         attr->sched_type, ev->sched_type);
341         RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
342                         "queue mismatch enq=%d deq =%d",
343                         attr->queue, ev->queue_id);
344         return 0;
345 }
346
347 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
348                                  struct rte_event *ev);
349
350 static int
351 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
352 {
353         int ret;
354         uint16_t valid_event;
355         uint32_t events = 0, forward_progress_cnt = 0, index = 0;
356         struct rte_event ev;
357
358         while (1) {
359                 if (++forward_progress_cnt > UINT16_MAX) {
360                         dpaa2_evdev_err("Detected deadlock");
361                         return -1;
362                 }
363
364                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
365                 if (!valid_event)
366                         continue;
367
368                 forward_progress_cnt = 0;
369                 ret = validate_event(&ev);
370                 if (ret)
371                         return -1;
372
373                 if (fn != NULL) {
374                         ret = fn(index, port, &ev);
375                         RTE_TEST_ASSERT_SUCCESS(ret,
376                                 "Failed to validate test specific event");
377                 }
378
379                 ++index;
380
381                 rte_pktmbuf_free(ev.mbuf);
382                 if (++events >= total_events)
383                         break;
384         }
385
386         return check_excess_events(port);
387 }
388
389 static int
390 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
391 {
392         struct event_attr *attr;
393
394         attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
395
396         RTE_SET_USED(port);
397         RTE_TEST_ASSERT_EQUAL(index, attr->seq,
398                 "index=%d != seqn=%d", index, attr->seq);
399         return 0;
400 }
401
402 static int
403 test_simple_enqdeq(uint8_t sched_type)
404 {
405         int ret;
406
407         ret = inject_events(0 /*flow_id */,
408                                 RTE_EVENT_TYPE_CPU /* event_type */,
409                                 0 /* sub_event_type */,
410                                 sched_type,
411                                 0 /* queue */,
412                                 0 /* port */,
413                                 MAX_EVENTS);
414         if (ret)
415                 return -1;
416
417         return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
418 }
419
420 static int
421 test_simple_enqdeq_atomic(void)
422 {
423         return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
424 }
425
426 static int
427 test_simple_enqdeq_parallel(void)
428 {
429         return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
430 }
431
432 /*
433  * Generate a prescribed number of events and spread them across available
434  * queues. On dequeue, using single event port(port 0) verify the enqueued
435  * event attributes
436  */
437 static int
438 test_multi_queue_enq_single_port_deq(void)
439 {
440         int ret;
441
442         ret = generate_random_events(MAX_EVENTS);
443         if (ret)
444                 return -1;
445
446         return consume_events(0 /* port */, MAX_EVENTS, NULL);
447 }
448
449 static int
450 worker_multi_port_fn(void *arg)
451 {
452         struct test_core_param *param = arg;
453         struct rte_event ev;
454         uint16_t valid_event;
455         uint8_t port = param->port;
456         rte_atomic32_t *total_events = param->total_events;
457         int ret;
458
459         while (rte_atomic32_read(total_events) > 0) {
460                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
461                 if (!valid_event)
462                         continue;
463
464                 ret = validate_event(&ev);
465                 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
466                 rte_pktmbuf_free(ev.mbuf);
467                 rte_atomic32_sub(total_events, 1);
468         }
469         return 0;
470 }
471
472 static int
473 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
474 {
475         uint64_t cycles, print_cycles;
476
477         RTE_SET_USED(count);
478
479         print_cycles = cycles = rte_get_timer_cycles();
480         while (rte_eal_get_lcore_state(lcore) != FINISHED) {
481                 uint64_t new_cycles = rte_get_timer_cycles();
482
483                 if (new_cycles - print_cycles > rte_get_timer_hz()) {
484                         dpaa2_evdev_dbg("\r%s: events %d", __func__,
485                                 rte_atomic32_read(count));
486                         print_cycles = new_cycles;
487                 }
488                 if (new_cycles - cycles > rte_get_timer_hz() * 10) {
489                         dpaa2_evdev_info(
490                                 "%s: No schedules for seconds, deadlock (%d)",
491                                 __func__,
492                                 rte_atomic32_read(count));
493                         rte_event_dev_dump(evdev, stdout);
494                         cycles = new_cycles;
495                         return -1;
496                 }
497         }
498         rte_eal_mp_wait_lcore();
499         return 0;
500 }
501
502
503 static int
504 launch_workers_and_wait(int (*master_worker)(void *),
505                         int (*slave_workers)(void *), uint32_t total_events,
506                         uint8_t nb_workers, uint8_t sched_type)
507 {
508         uint8_t port = 0;
509         int w_lcore;
510         int ret;
511         struct test_core_param *param;
512         rte_atomic32_t atomic_total_events;
513         uint64_t dequeue_tmo_ticks;
514
515         if (!nb_workers)
516                 return 0;
517
518         rte_atomic32_set(&atomic_total_events, total_events);
519         seqn_list_init();
520
521         param = malloc(sizeof(struct test_core_param) * nb_workers);
522         if (!param)
523                 return -1;
524
525         ret = rte_event_dequeue_timeout_ticks(evdev,
526                 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
527         if (ret) {
528                 free(param);
529                 return -1;
530         }
531
532         param[0].total_events = &atomic_total_events;
533         param[0].sched_type = sched_type;
534         param[0].port = 0;
535         param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
536         rte_smp_wmb();
537
538         w_lcore = rte_get_next_lcore(
539                         /* start core */ -1,
540                         /* skip master */ 1,
541                         /* wrap */ 0);
542         rte_eal_remote_launch(master_worker, &param[0], w_lcore);
543
544         for (port = 1; port < nb_workers; port++) {
545                 param[port].total_events = &atomic_total_events;
546                 param[port].sched_type = sched_type;
547                 param[port].port = port;
548                 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
549                 rte_smp_wmb();
550                 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
551                 rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
552         }
553
554         ret = wait_workers_to_join(w_lcore, &atomic_total_events);
555         free(param);
556         return ret;
557 }
558
559 /*
560  * Generate a prescribed number of events and spread them across available
561  * queues. Dequeue the events through multiple ports and verify the enqueued
562  * event attributes
563  */
564 static int
565 test_multi_queue_enq_multi_port_deq(void)
566 {
567         const unsigned int total_events = MAX_EVENTS;
568         uint32_t nr_ports;
569         int ret;
570
571         ret = generate_random_events(total_events);
572         if (ret)
573                 return -1;
574
575         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
576                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
577                                 &nr_ports), "Port count get failed");
578         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
579
580         if (!nr_ports) {
581                 dpaa2_evdev_err("%s: Not enough ports=%d or workers=%d",
582                                 __func__, nr_ports, rte_lcore_count() - 1);
583                 return 0;
584         }
585
586         return launch_workers_and_wait(worker_multi_port_fn,
587                                         worker_multi_port_fn, total_events,
588                                         nr_ports, 0xff /* invalid */);
589 }
590
591 static
592 void flush(uint8_t dev_id, struct rte_event event, void *arg)
593 {
594         unsigned int *count = arg;
595
596         RTE_SET_USED(dev_id);
597         if (event.event_type == RTE_EVENT_TYPE_CPU)
598                 *count = *count + 1;
599
600 }
601
602 static int
603 test_dev_stop_flush(void)
604 {
605         unsigned int total_events = MAX_EVENTS, count = 0;
606         int ret;
607
608         ret = generate_random_events(total_events);
609         if (ret)
610                 return -1;
611
612         ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
613         if (ret)
614                 return -2;
615         rte_event_dev_stop(evdev);
616         ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
617         if (ret)
618                 return -3;
619         RTE_TEST_ASSERT_EQUAL(total_events, count,
620                                 "count mismatch total_events=%d count=%d",
621                                 total_events, count);
622         return 0;
623 }
624
625 static int
626 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
627                         struct rte_event *ev)
628 {
629         RTE_SET_USED(index);
630         RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
631                                 "queue mismatch enq=%d deq =%d",
632                                 port, ev->queue_id);
633         return 0;
634 }
635
636 /*
637  * Link queue x to port x and check correctness of link by checking
638  * queue_id == x on dequeue on the specific port x
639  */
640 static int
641 test_queue_to_port_single_link(void)
642 {
643         int i, nr_links, ret;
644
645         uint32_t port_count;
646
647         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
648                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
649                                 &port_count), "Port count get failed");
650
651         /* Unlink all connections that created in eventdev_setup */
652         for (i = 0; i < (int)port_count; i++) {
653                 ret = rte_event_port_unlink(evdev, i, NULL, 0);
654                 RTE_TEST_ASSERT(ret >= 0,
655                                 "Failed to unlink all queues port=%d", i);
656         }
657
658         uint32_t queue_count;
659
660         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
661                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
662                             &queue_count), "Queue count get failed");
663
664         nr_links = RTE_MIN(port_count, queue_count);
665         const unsigned int total_events = MAX_EVENTS / nr_links;
666
667         /* Link queue x to port x and inject events to queue x through port x */
668         for (i = 0; i < nr_links; i++) {
669                 uint8_t queue = (uint8_t)i;
670
671                 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
672                 RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
673
674                 ret = inject_events(
675                         0x100 /*flow_id */,
676                         RTE_EVENT_TYPE_CPU /* event_type */,
677                         rte_rand() % 256 /* sub_event_type */,
678                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
679                         queue /* queue */,
680                         i /* port */,
681                         total_events /* events */);
682                 if (ret)
683                         return -1;
684         }
685
686         /* Verify the events generated from correct queue */
687         for (i = 0; i < nr_links; i++) {
688                 ret = consume_events(i /* port */, total_events,
689                                 validate_queue_to_port_single_link);
690                 if (ret)
691                         return -1;
692         }
693
694         return 0;
695 }
696
697 static int
698 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
699                         struct rte_event *ev)
700 {
701         RTE_SET_USED(index);
702         RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
703                                 "queue mismatch enq=%d deq =%d",
704                                 port, ev->queue_id);
705         return 0;
706 }
707
708 /*
709  * Link all even number of queues to port 0 and all odd number of queues to
710  * port 1 and verify the link connection on dequeue
711  */
712 static int
713 test_queue_to_port_multi_link(void)
714 {
715         int ret, port0_events = 0, port1_events = 0;
716         uint8_t queue, port;
717         uint32_t nr_queues = 0;
718         uint32_t nr_ports = 0;
719
720         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
721                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
722                             &nr_queues), "Queue count get failed");
723
724         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
725                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
726                                 &nr_queues), "Queue count get failed");
727         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
728                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
729                                 &nr_ports), "Port count get failed");
730
731         if (nr_ports < 2) {
732                 dpaa2_evdev_err("%s: Not enough ports to test ports=%d",
733                                 __func__, nr_ports);
734                 return 0;
735         }
736
737         /* Unlink all connections that created in eventdev_setup */
738         for (port = 0; port < nr_ports; port++) {
739                 ret = rte_event_port_unlink(evdev, port, NULL, 0);
740                 RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
741                                         port);
742         }
743
744         const unsigned int total_events = MAX_EVENTS / nr_queues;
745
746         /* Link all even number of queues to port0 and odd numbers to port 1*/
747         for (queue = 0; queue < nr_queues; queue++) {
748                 port = queue & 0x1;
749                 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
750                 RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
751                                         queue, port);
752
753                 ret = inject_events(
754                         0x100 /*flow_id */,
755                         RTE_EVENT_TYPE_CPU /* event_type */,
756                         rte_rand() % 256 /* sub_event_type */,
757                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
758                         queue /* queue */,
759                         port /* port */,
760                         total_events /* events */);
761                 if (ret)
762                         return -1;
763
764                 if (port == 0)
765                         port0_events += total_events;
766                 else
767                         port1_events += total_events;
768         }
769
770         ret = consume_events(0 /* port */, port0_events,
771                                 validate_queue_to_port_multi_link);
772         if (ret)
773                 return -1;
774         ret = consume_events(1 /* port */, port1_events,
775                                 validate_queue_to_port_multi_link);
776         if (ret)
777                 return -1;
778
779         return 0;
780 }
781
782 static void dpaa2_test_run(int (*setup)(void), void (*tdown)(void),
783                 int (*test)(void), const char *name)
784 {
785         if (setup() < 0) {
786                 RTE_LOG(INFO, PMD, "Error setting up test %s", name);
787                 unsupported++;
788         } else {
789                 if (test() < 0) {
790                         failed++;
791                         RTE_LOG(INFO, PMD, "%s Failed\n", name);
792                 } else {
793                         passed++;
794                         RTE_LOG(INFO, PMD, "%s Passed", name);
795                 }
796         }
797
798         total++;
799         tdown();
800 }
801
802 int
803 test_eventdev_dpaa2(void)
804 {
805         testsuite_setup();
806
807         DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
808                         test_simple_enqdeq_atomic);
809         DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
810                         test_simple_enqdeq_parallel);
811         DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
812                         test_multi_queue_enq_single_port_deq);
813         DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
814                         test_dev_stop_flush);
815         DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
816                         test_multi_queue_enq_multi_port_deq);
817         DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
818                         test_queue_to_port_single_link);
819         DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
820                         test_queue_to_port_multi_link);
821
822         DPAA2_EVENTDEV_INFO("Total tests   : %d", total);
823         DPAA2_EVENTDEV_INFO("Passed        : %d", passed);
824         DPAA2_EVENTDEV_INFO("Failed        : %d", failed);
825         DPAA2_EVENTDEV_INFO("Not supported : %d", unsupported);
826
827         testsuite_teardown();
828
829         if (failed)
830                 return -1;
831
832         return 0;
833 }