1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_launch.h>
17 #include <rte_lcore.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
20 #include <rte_bus_vdev.h>
23 #include "dpaa2_eventdev.h"
24 #include "dpaa2_eventdev_logs.h"
27 #define NUM_PACKETS (1 << 18)
29 #define DPAA2_TEST_RUN(setup, teardown, test) \
30 dpaa2_test_run(setup, teardown, test, #test)
35 static int unsupported;
38 static struct rte_mempool *eventdev_test_mempool;
43 uint8_t sub_event_type;
50 struct test_core_param {
51 rte_atomic32_t *total_events;
52 uint64_t dequeue_tmo_ticks;
60 const char *eventdev_name = "event_dpaa2";
62 evdev = rte_event_dev_get_dev_id(eventdev_name);
64 dpaa2_evdev_dbg("%d: Eventdev %s not found - creating.",
65 __LINE__, eventdev_name);
66 if (rte_vdev_init(eventdev_name, NULL) < 0) {
67 dpaa2_evdev_err("Error creating eventdev %s",
71 evdev = rte_event_dev_get_dev_id(eventdev_name);
73 dpaa2_evdev_err("Error finding newly created eventdev");
82 testsuite_teardown(void)
84 rte_event_dev_close(evdev);
88 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
89 struct rte_event_dev_info *info)
91 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
92 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
93 dev_conf->nb_event_ports = info->max_event_ports;
94 dev_conf->nb_event_queues = info->max_event_queues;
95 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
96 dev_conf->nb_event_port_dequeue_depth =
97 info->max_event_port_dequeue_depth;
98 dev_conf->nb_event_port_enqueue_depth =
99 info->max_event_port_enqueue_depth;
100 dev_conf->nb_event_port_enqueue_depth =
101 info->max_event_port_enqueue_depth;
102 dev_conf->nb_events_limit =
103 info->max_num_events;
107 TEST_EVENTDEV_SETUP_DEFAULT,
108 TEST_EVENTDEV_SETUP_PRIORITY,
109 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
113 _eventdev_setup(int mode)
116 struct rte_event_dev_config dev_conf;
117 struct rte_event_dev_info info;
118 const char *pool_name = "evdev_dpaa2_test_pool";
120 /* Create and destrory pool for each test case to make it standalone */
121 eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
123 0 /*MBUF_CACHE_SIZE*/,
125 512, /* Use very small mbufs */
127 if (!eventdev_test_mempool) {
128 dpaa2_evdev_err("ERROR creating mempool");
132 ret = rte_event_dev_info_get(evdev, &info);
133 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
134 RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
135 "ERROR max_num_events=%d < max_events=%d",
136 info.max_num_events, MAX_EVENTS);
138 devconf_set_default_sane_values(&dev_conf, &info);
139 if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
140 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
142 ret = rte_event_dev_configure(evdev, &dev_conf);
143 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
145 uint32_t queue_count;
146 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
147 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
148 &queue_count), "Queue count get failed");
150 if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
151 if (queue_count > 8) {
153 "test expects the unique priority per queue");
157 /* Configure event queues(0 to n) with
158 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
159 * RTE_EVENT_DEV_PRIORITY_LOWEST
161 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
163 for (i = 0; i < (int)queue_count; i++) {
164 struct rte_event_queue_conf queue_conf;
166 ret = rte_event_queue_default_conf_get(evdev, i,
168 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
170 queue_conf.priority = i * step;
171 ret = rte_event_queue_setup(evdev, i, &queue_conf);
172 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
177 /* Configure event queues with default priority */
178 for (i = 0; i < (int)queue_count; i++) {
179 ret = rte_event_queue_setup(evdev, i, NULL);
180 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
184 /* Configure event ports */
186 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
187 RTE_EVENT_DEV_ATTR_PORT_COUNT,
188 &port_count), "Port count get failed");
189 for (i = 0; i < (int)port_count; i++) {
190 ret = rte_event_port_setup(evdev, i, NULL);
191 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
192 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
193 RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
197 ret = rte_event_dev_start(evdev);
198 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
206 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
210 eventdev_teardown(void)
212 rte_event_dev_stop(evdev);
213 rte_mempool_free(eventdev_test_mempool);
217 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
218 uint32_t flow_id, uint8_t event_type,
219 uint8_t sub_event_type, uint8_t sched_type,
220 uint8_t queue, uint8_t port, uint8_t seq)
222 struct event_attr *attr;
224 /* Store the event attributes in mbuf for future reference */
225 attr = rte_pktmbuf_mtod(m, struct event_attr *);
226 attr->flow_id = flow_id;
227 attr->event_type = event_type;
228 attr->sub_event_type = sub_event_type;
229 attr->sched_type = sched_type;
234 ev->flow_id = flow_id;
235 ev->sub_event_type = sub_event_type;
236 ev->event_type = event_type;
237 /* Inject the new event */
238 ev->op = RTE_EVENT_OP_NEW;
239 ev->sched_type = sched_type;
240 ev->queue_id = queue;
245 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
246 uint8_t sched_type, uint8_t queue, uint8_t port,
252 for (i = 0; i < events; i++) {
253 struct rte_event ev = {.event = 0, .u64 = 0};
255 m = rte_pktmbuf_alloc(eventdev_test_mempool);
256 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
258 update_event_and_validation_attr(m, &ev, flow_id, event_type,
259 sub_event_type, sched_type, queue, port, i);
260 rte_event_enqueue_burst(evdev, port, &ev, 1);
266 check_excess_events(uint8_t port)
269 uint16_t valid_event;
272 /* Check for excess events, try for a few times and exit */
273 for (i = 0; i < 32; i++) {
274 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
276 RTE_TEST_ASSERT_SUCCESS(valid_event,
277 "Unexpected valid event=%d", ev.mbuf->seqn);
283 generate_random_events(const unsigned int total_events)
285 struct rte_event_dev_info info;
289 uint32_t queue_count;
290 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
291 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
292 &queue_count), "Queue count get failed");
294 ret = rte_event_dev_info_get(evdev, &info);
295 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
296 for (i = 0; i < total_events; i++) {
298 rte_rand() % info.max_event_queue_flows /*flow_id */,
299 RTE_EVENT_TYPE_CPU /* event_type */,
300 rte_rand() % 256 /* sub_event_type */,
301 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
302 rte_rand() % queue_count /* queue */,
313 validate_event(struct rte_event *ev)
315 struct event_attr *attr;
317 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
318 RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
319 "flow_id mismatch enq=%d deq =%d",
320 attr->flow_id, ev->flow_id);
321 RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
322 "event_type mismatch enq=%d deq =%d",
323 attr->event_type, ev->event_type);
324 RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
325 "sub_event_type mismatch enq=%d deq =%d",
326 attr->sub_event_type, ev->sub_event_type);
327 RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
328 "sched_type mismatch enq=%d deq =%d",
329 attr->sched_type, ev->sched_type);
330 RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
331 "queue mismatch enq=%d deq =%d",
332 attr->queue, ev->queue_id);
336 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
337 struct rte_event *ev);
340 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
343 uint16_t valid_event;
344 uint32_t events = 0, forward_progress_cnt = 0, index = 0;
348 if (++forward_progress_cnt > UINT16_MAX) {
349 dpaa2_evdev_err("Detected deadlock");
353 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
357 forward_progress_cnt = 0;
358 ret = validate_event(&ev);
363 ret = fn(index, port, &ev);
364 RTE_TEST_ASSERT_SUCCESS(ret,
365 "Failed to validate test specific event");
370 rte_pktmbuf_free(ev.mbuf);
371 if (++events >= total_events)
375 return check_excess_events(port);
379 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
381 struct event_attr *attr;
383 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
386 RTE_TEST_ASSERT_EQUAL(index, attr->seq,
387 "index=%d != seqn=%d", index, attr->seq);
392 test_simple_enqdeq(uint8_t sched_type)
396 ret = inject_events(0 /*flow_id */,
397 RTE_EVENT_TYPE_CPU /* event_type */,
398 0 /* sub_event_type */,
406 return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
410 test_simple_enqdeq_atomic(void)
412 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
416 test_simple_enqdeq_parallel(void)
418 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
422 * Generate a prescribed number of events and spread them across available
423 * queues. On dequeue, using single event port(port 0) verify the enqueued
427 test_multi_queue_enq_single_port_deq(void)
431 ret = generate_random_events(MAX_EVENTS);
435 return consume_events(0 /* port */, MAX_EVENTS, NULL);
439 worker_multi_port_fn(void *arg)
441 struct test_core_param *param = arg;
443 uint16_t valid_event;
444 uint8_t port = param->port;
445 rte_atomic32_t *total_events = param->total_events;
448 while (rte_atomic32_read(total_events) > 0) {
449 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
453 ret = validate_event(&ev);
454 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
455 rte_pktmbuf_free(ev.mbuf);
456 rte_atomic32_sub(total_events, 1);
462 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
464 uint64_t cycles, print_cycles;
468 print_cycles = cycles = rte_get_timer_cycles();
469 while (rte_eal_get_lcore_state(lcore) != FINISHED) {
470 uint64_t new_cycles = rte_get_timer_cycles();
472 if (new_cycles - print_cycles > rte_get_timer_hz()) {
473 dpaa2_evdev_dbg("\r%s: events %d", __func__,
474 rte_atomic32_read(count));
475 print_cycles = new_cycles;
477 if (new_cycles - cycles > rte_get_timer_hz() * 10) {
479 "%s: No schedules for seconds, deadlock (%d)",
481 rte_atomic32_read(count));
482 rte_event_dev_dump(evdev, stdout);
487 rte_eal_mp_wait_lcore();
493 launch_workers_and_wait(int (*main_worker)(void *),
494 int (*workers)(void *), uint32_t total_events,
495 uint8_t nb_workers, uint8_t sched_type)
500 struct test_core_param *param;
501 rte_atomic32_t atomic_total_events;
502 uint64_t dequeue_tmo_ticks;
507 rte_atomic32_set(&atomic_total_events, total_events);
508 RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
510 param = malloc(sizeof(struct test_core_param) * nb_workers);
514 ret = rte_event_dequeue_timeout_ticks(evdev,
515 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
521 param[0].total_events = &atomic_total_events;
522 param[0].sched_type = sched_type;
524 param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
527 w_lcore = rte_get_next_lcore(
531 rte_eal_remote_launch(main_worker, ¶m[0], w_lcore);
533 for (port = 1; port < nb_workers; port++) {
534 param[port].total_events = &atomic_total_events;
535 param[port].sched_type = sched_type;
536 param[port].port = port;
537 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
539 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
540 rte_eal_remote_launch(workers, ¶m[port], w_lcore);
543 ret = wait_workers_to_join(w_lcore, &atomic_total_events);
549 * Generate a prescribed number of events and spread them across available
550 * queues. Dequeue the events through multiple ports and verify the enqueued
554 test_multi_queue_enq_multi_port_deq(void)
556 const unsigned int total_events = MAX_EVENTS;
560 ret = generate_random_events(total_events);
564 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
565 RTE_EVENT_DEV_ATTR_PORT_COUNT,
566 &nr_ports), "Port count get failed");
567 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
570 dpaa2_evdev_err("%s: Not enough ports=%d or workers=%d",
571 __func__, nr_ports, rte_lcore_count() - 1);
575 return launch_workers_and_wait(worker_multi_port_fn,
576 worker_multi_port_fn, total_events,
577 nr_ports, 0xff /* invalid */);
581 void flush(uint8_t dev_id, struct rte_event event, void *arg)
583 unsigned int *count = arg;
585 RTE_SET_USED(dev_id);
586 if (event.event_type == RTE_EVENT_TYPE_CPU)
592 test_dev_stop_flush(void)
594 unsigned int total_events = MAX_EVENTS, count = 0;
597 ret = generate_random_events(total_events);
601 ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
604 rte_event_dev_stop(evdev);
605 ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
608 RTE_TEST_ASSERT_EQUAL(total_events, count,
609 "count mismatch total_events=%d count=%d",
610 total_events, count);
615 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
616 struct rte_event *ev)
619 RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
620 "queue mismatch enq=%d deq =%d",
626 * Link queue x to port x and check correctness of link by checking
627 * queue_id == x on dequeue on the specific port x
630 test_queue_to_port_single_link(void)
632 int i, nr_links, ret;
636 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
637 RTE_EVENT_DEV_ATTR_PORT_COUNT,
638 &port_count), "Port count get failed");
640 /* Unlink all connections that created in eventdev_setup */
641 for (i = 0; i < (int)port_count; i++) {
642 ret = rte_event_port_unlink(evdev, i, NULL, 0);
643 RTE_TEST_ASSERT(ret >= 0,
644 "Failed to unlink all queues port=%d", i);
647 uint32_t queue_count;
649 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
650 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
651 &queue_count), "Queue count get failed");
653 nr_links = RTE_MIN(port_count, queue_count);
654 const unsigned int total_events = MAX_EVENTS / nr_links;
656 /* Link queue x to port x and inject events to queue x through port x */
657 for (i = 0; i < nr_links; i++) {
658 uint8_t queue = (uint8_t)i;
660 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
661 RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
665 RTE_EVENT_TYPE_CPU /* event_type */,
666 rte_rand() % 256 /* sub_event_type */,
667 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
670 total_events /* events */);
675 /* Verify the events generated from correct queue */
676 for (i = 0; i < nr_links; i++) {
677 ret = consume_events(i /* port */, total_events,
678 validate_queue_to_port_single_link);
687 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
688 struct rte_event *ev)
691 RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
692 "queue mismatch enq=%d deq =%d",
698 * Link all even number of queues to port 0 and all odd number of queues to
699 * port 1 and verify the link connection on dequeue
702 test_queue_to_port_multi_link(void)
704 int ret, port0_events = 0, port1_events = 0;
706 uint32_t nr_queues = 0;
707 uint32_t nr_ports = 0;
709 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
710 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
711 &nr_queues), "Queue count get failed");
713 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
714 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
715 &nr_queues), "Queue count get failed");
716 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
717 RTE_EVENT_DEV_ATTR_PORT_COUNT,
718 &nr_ports), "Port count get failed");
721 dpaa2_evdev_err("%s: Not enough ports to test ports=%d",
726 /* Unlink all connections that created in eventdev_setup */
727 for (port = 0; port < nr_ports; port++) {
728 ret = rte_event_port_unlink(evdev, port, NULL, 0);
729 RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
733 const unsigned int total_events = MAX_EVENTS / nr_queues;
735 /* Link all even number of queues to port0 and odd numbers to port 1*/
736 for (queue = 0; queue < nr_queues; queue++) {
738 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
739 RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
744 RTE_EVENT_TYPE_CPU /* event_type */,
745 rte_rand() % 256 /* sub_event_type */,
746 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
749 total_events /* events */);
754 port0_events += total_events;
756 port1_events += total_events;
759 ret = consume_events(0 /* port */, port0_events,
760 validate_queue_to_port_multi_link);
763 ret = consume_events(1 /* port */, port1_events,
764 validate_queue_to_port_multi_link);
771 static void dpaa2_test_run(int (*setup)(void), void (*tdown)(void),
772 int (*test)(void), const char *name)
775 RTE_LOG(INFO, PMD, "Error setting up test %s", name);
780 RTE_LOG(INFO, PMD, "%s Failed\n", name);
783 RTE_LOG(INFO, PMD, "%s Passed", name);
792 test_eventdev_dpaa2(void)
796 DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
797 test_simple_enqdeq_atomic);
798 DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
799 test_simple_enqdeq_parallel);
800 DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
801 test_multi_queue_enq_single_port_deq);
802 DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
803 test_dev_stop_flush);
804 DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
805 test_multi_queue_enq_multi_port_deq);
806 DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
807 test_queue_to_port_single_link);
808 DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
809 test_queue_to_port_multi_link);
811 DPAA2_EVENTDEV_INFO("Total tests : %d", total);
812 DPAA2_EVENTDEV_INFO("Passed : %d", passed);
813 DPAA2_EVENTDEV_INFO("Failed : %d", failed);
814 DPAA2_EVENTDEV_INFO("Not supported : %d", unsupported);
816 testsuite_teardown();