5447db8a8a37543eab1c795cf02d07079e875392
[dpdk.git] / drivers / event / dpaa2 / dpaa2_eventdev_selftest.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_debug.h>
9 #include <rte_eal.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_launch.h>
17 #include <rte_lcore.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
20 #include <rte_bus_vdev.h>
21 #include <rte_test.h>
22
23 #include "dpaa2_eventdev.h"
24 #include "dpaa2_eventdev_logs.h"
25
26 #define MAX_PORTS 4
27 #define NUM_PACKETS (1 << 18)
28 #define MAX_EVENTS  8
29 #define DPAA2_TEST_RUN(setup, teardown, test) \
30         dpaa2_test_run(setup, teardown, test, #test)
31
32 static int total;
33 static int passed;
34 static int failed;
35 static int unsupported;
36
37 static int evdev;
38 static struct rte_mempool *eventdev_test_mempool;
39
40 struct event_attr {
41         uint32_t flow_id;
42         uint8_t event_type;
43         uint8_t sub_event_type;
44         uint8_t sched_type;
45         uint8_t queue;
46         uint8_t port;
47         uint8_t seq;
48 };
49
50 struct test_core_param {
51         rte_atomic32_t *total_events;
52         uint64_t dequeue_tmo_ticks;
53         uint8_t port;
54         uint8_t sched_type;
55 };
56
57 static int
58 testsuite_setup(void)
59 {
60         const char *eventdev_name = "event_dpaa2";
61
62         evdev = rte_event_dev_get_dev_id(eventdev_name);
63         if (evdev < 0) {
64                 dpaa2_evdev_dbg("%d: Eventdev %s not found - creating.",
65                                 __LINE__, eventdev_name);
66                 if (rte_vdev_init(eventdev_name, NULL) < 0) {
67                         dpaa2_evdev_err("Error creating eventdev %s",
68                                         eventdev_name);
69                         return -1;
70                 }
71                 evdev = rte_event_dev_get_dev_id(eventdev_name);
72                 if (evdev < 0) {
73                         dpaa2_evdev_err("Error finding newly created eventdev");
74                         return -1;
75                 }
76         }
77
78         return 0;
79 }
80
81 static void
82 testsuite_teardown(void)
83 {
84         rte_event_dev_close(evdev);
85 }
86
87 static void
88 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
89                         struct rte_event_dev_info *info)
90 {
91         memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
92         dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
93         dev_conf->nb_event_ports = info->max_event_ports;
94         dev_conf->nb_event_queues = info->max_event_queues;
95         dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
96         dev_conf->nb_event_port_dequeue_depth =
97                         info->max_event_port_dequeue_depth;
98         dev_conf->nb_event_port_enqueue_depth =
99                         info->max_event_port_enqueue_depth;
100         dev_conf->nb_event_port_enqueue_depth =
101                         info->max_event_port_enqueue_depth;
102         dev_conf->nb_events_limit =
103                         info->max_num_events;
104 }
105
106 enum {
107         TEST_EVENTDEV_SETUP_DEFAULT,
108         TEST_EVENTDEV_SETUP_PRIORITY,
109         TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
110 };
111
112 static int
113 _eventdev_setup(int mode)
114 {
115         int i, ret;
116         struct rte_event_dev_config dev_conf;
117         struct rte_event_dev_info info;
118         const char *pool_name = "evdev_dpaa2_test_pool";
119
120         /* Create and destrory pool for each test case to make it standalone */
121         eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
122                                         MAX_EVENTS,
123                                         0 /*MBUF_CACHE_SIZE*/,
124                                         0,
125                                         512, /* Use very small mbufs */
126                                         rte_socket_id());
127         if (!eventdev_test_mempool) {
128                 dpaa2_evdev_err("ERROR creating mempool");
129                 return -1;
130         }
131
132         ret = rte_event_dev_info_get(evdev, &info);
133         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
134         RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
135                         "ERROR max_num_events=%d < max_events=%d",
136                                 info.max_num_events, MAX_EVENTS);
137
138         devconf_set_default_sane_values(&dev_conf, &info);
139         if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
140                 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
141
142         ret = rte_event_dev_configure(evdev, &dev_conf);
143         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
144
145         uint32_t queue_count;
146         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
147                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
148                             &queue_count), "Queue count get failed");
149
150         if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
151                 if (queue_count > 8) {
152                         dpaa2_evdev_err(
153                                 "test expects the unique priority per queue");
154                         return -ENOTSUP;
155                 }
156
157                 /* Configure event queues(0 to n) with
158                  * RTE_EVENT_DEV_PRIORITY_HIGHEST to
159                  * RTE_EVENT_DEV_PRIORITY_LOWEST
160                  */
161                 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
162                                 queue_count;
163                 for (i = 0; i < (int)queue_count; i++) {
164                         struct rte_event_queue_conf queue_conf;
165
166                         ret = rte_event_queue_default_conf_get(evdev, i,
167                                                 &queue_conf);
168                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
169                                         i);
170                         queue_conf.priority = i * step;
171                         ret = rte_event_queue_setup(evdev, i, &queue_conf);
172                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
173                                         i);
174                 }
175
176         } else {
177                 /* Configure event queues with default priority */
178                 for (i = 0; i < (int)queue_count; i++) {
179                         ret = rte_event_queue_setup(evdev, i, NULL);
180                         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
181                                         i);
182                 }
183         }
184         /* Configure event ports */
185         uint32_t port_count;
186         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
187                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
188                                 &port_count), "Port count get failed");
189         for (i = 0; i < (int)port_count; i++) {
190                 ret = rte_event_port_setup(evdev, i, NULL);
191                 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
192                 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
193                 RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
194                                 i);
195         }
196
197         ret = rte_event_dev_start(evdev);
198         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
199
200         return 0;
201 }
202
203 static int
204 eventdev_setup(void)
205 {
206         return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
207 }
208
209 static void
210 eventdev_teardown(void)
211 {
212         rte_event_dev_stop(evdev);
213         rte_mempool_free(eventdev_test_mempool);
214 }
215
216 static void
217 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
218                         uint32_t flow_id, uint8_t event_type,
219                         uint8_t sub_event_type, uint8_t sched_type,
220                         uint8_t queue, uint8_t port, uint8_t seq)
221 {
222         struct event_attr *attr;
223
224         /* Store the event attributes in mbuf for future reference */
225         attr = rte_pktmbuf_mtod(m, struct event_attr *);
226         attr->flow_id = flow_id;
227         attr->event_type = event_type;
228         attr->sub_event_type = sub_event_type;
229         attr->sched_type = sched_type;
230         attr->queue = queue;
231         attr->port = port;
232         attr->seq = seq;
233
234         ev->flow_id = flow_id;
235         ev->sub_event_type = sub_event_type;
236         ev->event_type = event_type;
237         /* Inject the new event */
238         ev->op = RTE_EVENT_OP_NEW;
239         ev->sched_type = sched_type;
240         ev->queue_id = queue;
241         ev->mbuf = m;
242 }
243
244 static int
245 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
246                 uint8_t sched_type, uint8_t queue, uint8_t port,
247                 unsigned int events)
248 {
249         struct rte_mbuf *m;
250         unsigned int i;
251
252         for (i = 0; i < events; i++) {
253                 struct rte_event ev = {.event = 0, .u64 = 0};
254
255                 m = rte_pktmbuf_alloc(eventdev_test_mempool);
256                 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
257
258                 update_event_and_validation_attr(m, &ev, flow_id, event_type,
259                         sub_event_type, sched_type, queue, port, i);
260                 rte_event_enqueue_burst(evdev, port, &ev, 1);
261         }
262         return 0;
263 }
264
265 static int
266 check_excess_events(uint8_t port)
267 {
268         int i;
269         uint16_t valid_event;
270         struct rte_event ev;
271
272         /* Check for excess events, try for a few times and exit */
273         for (i = 0; i < 32; i++) {
274                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
275
276                 RTE_TEST_ASSERT_SUCCESS(valid_event,
277                                 "Unexpected valid event=%d", ev.mbuf->seqn);
278         }
279         return 0;
280 }
281
282 static int
283 generate_random_events(const unsigned int total_events)
284 {
285         struct rte_event_dev_info info;
286         unsigned int i;
287         int ret;
288
289         uint32_t queue_count;
290         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
291                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
292                             &queue_count), "Queue count get failed");
293
294         ret = rte_event_dev_info_get(evdev, &info);
295         RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
296         for (i = 0; i < total_events; i++) {
297                 ret = inject_events(
298                         rte_rand() % info.max_event_queue_flows /*flow_id */,
299                         RTE_EVENT_TYPE_CPU /* event_type */,
300                         rte_rand() % 256 /* sub_event_type */,
301                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
302                         rte_rand() % queue_count /* queue */,
303                         0 /* port */,
304                         1 /* events */);
305                 if (ret)
306                         return -1;
307         }
308         return ret;
309 }
310
311
312 static int
313 validate_event(struct rte_event *ev)
314 {
315         struct event_attr *attr;
316
317         attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
318         RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
319                         "flow_id mismatch enq=%d deq =%d",
320                         attr->flow_id, ev->flow_id);
321         RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
322                         "event_type mismatch enq=%d deq =%d",
323                         attr->event_type, ev->event_type);
324         RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
325                         "sub_event_type mismatch enq=%d deq =%d",
326                         attr->sub_event_type, ev->sub_event_type);
327         RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
328                         "sched_type mismatch enq=%d deq =%d",
329                         attr->sched_type, ev->sched_type);
330         RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
331                         "queue mismatch enq=%d deq =%d",
332                         attr->queue, ev->queue_id);
333         return 0;
334 }
335
336 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
337                                  struct rte_event *ev);
338
339 static int
340 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
341 {
342         int ret;
343         uint16_t valid_event;
344         uint32_t events = 0, forward_progress_cnt = 0, index = 0;
345         struct rte_event ev;
346
347         while (1) {
348                 if (++forward_progress_cnt > UINT16_MAX) {
349                         dpaa2_evdev_err("Detected deadlock");
350                         return -1;
351                 }
352
353                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
354                 if (!valid_event)
355                         continue;
356
357                 forward_progress_cnt = 0;
358                 ret = validate_event(&ev);
359                 if (ret)
360                         return -1;
361
362                 if (fn != NULL) {
363                         ret = fn(index, port, &ev);
364                         RTE_TEST_ASSERT_SUCCESS(ret,
365                                 "Failed to validate test specific event");
366                 }
367
368                 ++index;
369
370                 rte_pktmbuf_free(ev.mbuf);
371                 if (++events >= total_events)
372                         break;
373         }
374
375         return check_excess_events(port);
376 }
377
378 static int
379 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
380 {
381         struct event_attr *attr;
382
383         attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
384
385         RTE_SET_USED(port);
386         RTE_TEST_ASSERT_EQUAL(index, attr->seq,
387                 "index=%d != seqn=%d", index, attr->seq);
388         return 0;
389 }
390
391 static int
392 test_simple_enqdeq(uint8_t sched_type)
393 {
394         int ret;
395
396         ret = inject_events(0 /*flow_id */,
397                                 RTE_EVENT_TYPE_CPU /* event_type */,
398                                 0 /* sub_event_type */,
399                                 sched_type,
400                                 0 /* queue */,
401                                 0 /* port */,
402                                 MAX_EVENTS);
403         if (ret)
404                 return -1;
405
406         return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
407 }
408
409 static int
410 test_simple_enqdeq_atomic(void)
411 {
412         return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
413 }
414
415 static int
416 test_simple_enqdeq_parallel(void)
417 {
418         return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
419 }
420
421 /*
422  * Generate a prescribed number of events and spread them across available
423  * queues. On dequeue, using single event port(port 0) verify the enqueued
424  * event attributes
425  */
426 static int
427 test_multi_queue_enq_single_port_deq(void)
428 {
429         int ret;
430
431         ret = generate_random_events(MAX_EVENTS);
432         if (ret)
433                 return -1;
434
435         return consume_events(0 /* port */, MAX_EVENTS, NULL);
436 }
437
438 static int
439 worker_multi_port_fn(void *arg)
440 {
441         struct test_core_param *param = arg;
442         struct rte_event ev;
443         uint16_t valid_event;
444         uint8_t port = param->port;
445         rte_atomic32_t *total_events = param->total_events;
446         int ret;
447
448         while (rte_atomic32_read(total_events) > 0) {
449                 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
450                 if (!valid_event)
451                         continue;
452
453                 ret = validate_event(&ev);
454                 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
455                 rte_pktmbuf_free(ev.mbuf);
456                 rte_atomic32_sub(total_events, 1);
457         }
458         return 0;
459 }
460
461 static int
462 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
463 {
464         uint64_t cycles, print_cycles;
465
466         RTE_SET_USED(count);
467
468         print_cycles = cycles = rte_get_timer_cycles();
469         while (rte_eal_get_lcore_state(lcore) != FINISHED) {
470                 uint64_t new_cycles = rte_get_timer_cycles();
471
472                 if (new_cycles - print_cycles > rte_get_timer_hz()) {
473                         dpaa2_evdev_dbg("\r%s: events %d", __func__,
474                                 rte_atomic32_read(count));
475                         print_cycles = new_cycles;
476                 }
477                 if (new_cycles - cycles > rte_get_timer_hz() * 10) {
478                         dpaa2_evdev_info(
479                                 "%s: No schedules for seconds, deadlock (%d)",
480                                 __func__,
481                                 rte_atomic32_read(count));
482                         rte_event_dev_dump(evdev, stdout);
483                         cycles = new_cycles;
484                         return -1;
485                 }
486         }
487         rte_eal_mp_wait_lcore();
488         return 0;
489 }
490
491
492 static int
493 launch_workers_and_wait(int (*main_worker)(void *),
494                         int (*workers)(void *), uint32_t total_events,
495                         uint8_t nb_workers, uint8_t sched_type)
496 {
497         uint8_t port = 0;
498         int w_lcore;
499         int ret;
500         struct test_core_param *param;
501         rte_atomic32_t atomic_total_events;
502         uint64_t dequeue_tmo_ticks;
503
504         if (!nb_workers)
505                 return 0;
506
507         rte_atomic32_set(&atomic_total_events, total_events);
508         RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
509
510         param = malloc(sizeof(struct test_core_param) * nb_workers);
511         if (!param)
512                 return -1;
513
514         ret = rte_event_dequeue_timeout_ticks(evdev,
515                 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
516         if (ret) {
517                 free(param);
518                 return -1;
519         }
520
521         param[0].total_events = &atomic_total_events;
522         param[0].sched_type = sched_type;
523         param[0].port = 0;
524         param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
525         rte_smp_wmb();
526
527         w_lcore = rte_get_next_lcore(
528                         /* start core */ -1,
529                         /* skip main */ 1,
530                         /* wrap */ 0);
531         rte_eal_remote_launch(main_worker, &param[0], w_lcore);
532
533         for (port = 1; port < nb_workers; port++) {
534                 param[port].total_events = &atomic_total_events;
535                 param[port].sched_type = sched_type;
536                 param[port].port = port;
537                 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
538                 rte_smp_wmb();
539                 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
540                 rte_eal_remote_launch(workers, &param[port], w_lcore);
541         }
542
543         ret = wait_workers_to_join(w_lcore, &atomic_total_events);
544         free(param);
545         return ret;
546 }
547
548 /*
549  * Generate a prescribed number of events and spread them across available
550  * queues. Dequeue the events through multiple ports and verify the enqueued
551  * event attributes
552  */
553 static int
554 test_multi_queue_enq_multi_port_deq(void)
555 {
556         const unsigned int total_events = MAX_EVENTS;
557         uint32_t nr_ports;
558         int ret;
559
560         ret = generate_random_events(total_events);
561         if (ret)
562                 return -1;
563
564         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
565                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
566                                 &nr_ports), "Port count get failed");
567         nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
568
569         if (!nr_ports) {
570                 dpaa2_evdev_err("%s: Not enough ports=%d or workers=%d",
571                                 __func__, nr_ports, rte_lcore_count() - 1);
572                 return 0;
573         }
574
575         return launch_workers_and_wait(worker_multi_port_fn,
576                                         worker_multi_port_fn, total_events,
577                                         nr_ports, 0xff /* invalid */);
578 }
579
580 static
581 void flush(uint8_t dev_id, struct rte_event event, void *arg)
582 {
583         unsigned int *count = arg;
584
585         RTE_SET_USED(dev_id);
586         if (event.event_type == RTE_EVENT_TYPE_CPU)
587                 *count = *count + 1;
588
589 }
590
591 static int
592 test_dev_stop_flush(void)
593 {
594         unsigned int total_events = MAX_EVENTS, count = 0;
595         int ret;
596
597         ret = generate_random_events(total_events);
598         if (ret)
599                 return -1;
600
601         ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
602         if (ret)
603                 return -2;
604         rte_event_dev_stop(evdev);
605         ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
606         if (ret)
607                 return -3;
608         RTE_TEST_ASSERT_EQUAL(total_events, count,
609                                 "count mismatch total_events=%d count=%d",
610                                 total_events, count);
611         return 0;
612 }
613
614 static int
615 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
616                         struct rte_event *ev)
617 {
618         RTE_SET_USED(index);
619         RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
620                                 "queue mismatch enq=%d deq =%d",
621                                 port, ev->queue_id);
622         return 0;
623 }
624
625 /*
626  * Link queue x to port x and check correctness of link by checking
627  * queue_id == x on dequeue on the specific port x
628  */
629 static int
630 test_queue_to_port_single_link(void)
631 {
632         int i, nr_links, ret;
633
634         uint32_t port_count;
635
636         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
637                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
638                                 &port_count), "Port count get failed");
639
640         /* Unlink all connections that created in eventdev_setup */
641         for (i = 0; i < (int)port_count; i++) {
642                 ret = rte_event_port_unlink(evdev, i, NULL, 0);
643                 RTE_TEST_ASSERT(ret >= 0,
644                                 "Failed to unlink all queues port=%d", i);
645         }
646
647         uint32_t queue_count;
648
649         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
650                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
651                             &queue_count), "Queue count get failed");
652
653         nr_links = RTE_MIN(port_count, queue_count);
654         const unsigned int total_events = MAX_EVENTS / nr_links;
655
656         /* Link queue x to port x and inject events to queue x through port x */
657         for (i = 0; i < nr_links; i++) {
658                 uint8_t queue = (uint8_t)i;
659
660                 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
661                 RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
662
663                 ret = inject_events(
664                         0x100 /*flow_id */,
665                         RTE_EVENT_TYPE_CPU /* event_type */,
666                         rte_rand() % 256 /* sub_event_type */,
667                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
668                         queue /* queue */,
669                         i /* port */,
670                         total_events /* events */);
671                 if (ret)
672                         return -1;
673         }
674
675         /* Verify the events generated from correct queue */
676         for (i = 0; i < nr_links; i++) {
677                 ret = consume_events(i /* port */, total_events,
678                                 validate_queue_to_port_single_link);
679                 if (ret)
680                         return -1;
681         }
682
683         return 0;
684 }
685
686 static int
687 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
688                         struct rte_event *ev)
689 {
690         RTE_SET_USED(index);
691         RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
692                                 "queue mismatch enq=%d deq =%d",
693                                 port, ev->queue_id);
694         return 0;
695 }
696
697 /*
698  * Link all even number of queues to port 0 and all odd number of queues to
699  * port 1 and verify the link connection on dequeue
700  */
701 static int
702 test_queue_to_port_multi_link(void)
703 {
704         int ret, port0_events = 0, port1_events = 0;
705         uint8_t queue, port;
706         uint32_t nr_queues = 0;
707         uint32_t nr_ports = 0;
708
709         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
710                             RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
711                             &nr_queues), "Queue count get failed");
712
713         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
714                                 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
715                                 &nr_queues), "Queue count get failed");
716         RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
717                                 RTE_EVENT_DEV_ATTR_PORT_COUNT,
718                                 &nr_ports), "Port count get failed");
719
720         if (nr_ports < 2) {
721                 dpaa2_evdev_err("%s: Not enough ports to test ports=%d",
722                                 __func__, nr_ports);
723                 return 0;
724         }
725
726         /* Unlink all connections that created in eventdev_setup */
727         for (port = 0; port < nr_ports; port++) {
728                 ret = rte_event_port_unlink(evdev, port, NULL, 0);
729                 RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
730                                         port);
731         }
732
733         const unsigned int total_events = MAX_EVENTS / nr_queues;
734
735         /* Link all even number of queues to port0 and odd numbers to port 1*/
736         for (queue = 0; queue < nr_queues; queue++) {
737                 port = queue & 0x1;
738                 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
739                 RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
740                                         queue, port);
741
742                 ret = inject_events(
743                         0x100 /*flow_id */,
744                         RTE_EVENT_TYPE_CPU /* event_type */,
745                         rte_rand() % 256 /* sub_event_type */,
746                         rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
747                         queue /* queue */,
748                         port /* port */,
749                         total_events /* events */);
750                 if (ret)
751                         return -1;
752
753                 if (port == 0)
754                         port0_events += total_events;
755                 else
756                         port1_events += total_events;
757         }
758
759         ret = consume_events(0 /* port */, port0_events,
760                                 validate_queue_to_port_multi_link);
761         if (ret)
762                 return -1;
763         ret = consume_events(1 /* port */, port1_events,
764                                 validate_queue_to_port_multi_link);
765         if (ret)
766                 return -1;
767
768         return 0;
769 }
770
771 static void dpaa2_test_run(int (*setup)(void), void (*tdown)(void),
772                 int (*test)(void), const char *name)
773 {
774         if (setup() < 0) {
775                 RTE_LOG(INFO, PMD, "Error setting up test %s", name);
776                 unsupported++;
777         } else {
778                 if (test() < 0) {
779                         failed++;
780                         RTE_LOG(INFO, PMD, "%s Failed\n", name);
781                 } else {
782                         passed++;
783                         RTE_LOG(INFO, PMD, "%s Passed", name);
784                 }
785         }
786
787         total++;
788         tdown();
789 }
790
791 int
792 test_eventdev_dpaa2(void)
793 {
794         testsuite_setup();
795
796         DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
797                         test_simple_enqdeq_atomic);
798         DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
799                         test_simple_enqdeq_parallel);
800         DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
801                         test_multi_queue_enq_single_port_deq);
802         DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
803                         test_dev_stop_flush);
804         DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
805                         test_multi_queue_enq_multi_port_deq);
806         DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
807                         test_queue_to_port_single_link);
808         DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
809                         test_queue_to_port_multi_link);
810
811         DPAA2_EVENTDEV_INFO("Total tests   : %d", total);
812         DPAA2_EVENTDEV_INFO("Passed        : %d", passed);
813         DPAA2_EVENTDEV_INFO("Failed        : %d", failed);
814         DPAA2_EVENTDEV_INFO("Not supported : %d", unsupported);
815
816         testsuite_teardown();
817
818         if (failed)
819                 return -1;
820
821         return 0;
822 }