1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_launch.h>
17 #include <rte_lcore.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
20 #include <rte_bus_vdev.h>
22 #include <rte_fslmc.h>
24 #include "dpaa2_eventdev.h"
25 #include "dpaa2_eventdev_logs.h"
28 #define NUM_PACKETS (1 << 18)
30 #define DPAA2_TEST_RUN(setup, teardown, test) \
31 dpaa2_test_run(setup, teardown, test, #test)
36 static int unsupported;
39 static struct rte_mempool *eventdev_test_mempool;
44 uint8_t sub_event_type;
51 struct test_core_param {
52 rte_atomic32_t *total_events;
53 uint64_t dequeue_tmo_ticks;
61 const char *eventdev_name = "event_dpaa2";
63 evdev = rte_event_dev_get_dev_id(eventdev_name);
65 dpaa2_evdev_dbg("%d: Eventdev %s not found - creating.",
66 __LINE__, eventdev_name);
67 if (rte_vdev_init(eventdev_name, NULL) < 0) {
68 dpaa2_evdev_err("Error creating eventdev %s",
72 evdev = rte_event_dev_get_dev_id(eventdev_name);
74 dpaa2_evdev_err("Error finding newly created eventdev");
83 testsuite_teardown(void)
85 rte_event_dev_close(evdev);
89 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
90 struct rte_event_dev_info *info)
92 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
93 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
94 dev_conf->nb_event_ports = info->max_event_ports;
95 dev_conf->nb_event_queues = info->max_event_queues;
96 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
97 dev_conf->nb_event_port_dequeue_depth =
98 info->max_event_port_dequeue_depth;
99 dev_conf->nb_event_port_enqueue_depth =
100 info->max_event_port_enqueue_depth;
101 dev_conf->nb_event_port_enqueue_depth =
102 info->max_event_port_enqueue_depth;
103 dev_conf->nb_events_limit =
104 info->max_num_events;
108 TEST_EVENTDEV_SETUP_DEFAULT,
109 TEST_EVENTDEV_SETUP_PRIORITY,
110 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
114 _eventdev_setup(int mode)
117 struct rte_event_dev_config dev_conf;
118 struct rte_event_dev_info info;
119 const char *pool_name = "evdev_dpaa2_test_pool";
121 /* Create and destroy pool for each test case to make it standalone */
122 eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
124 0 /*MBUF_CACHE_SIZE*/,
126 512, /* Use very small mbufs */
128 if (!eventdev_test_mempool) {
129 dpaa2_evdev_err("ERROR creating mempool");
133 ret = rte_event_dev_info_get(evdev, &info);
134 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
135 RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
136 "ERROR max_num_events=%d < max_events=%d",
137 info.max_num_events, MAX_EVENTS);
139 devconf_set_default_sane_values(&dev_conf, &info);
140 if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
141 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
143 ret = rte_event_dev_configure(evdev, &dev_conf);
144 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
146 uint32_t queue_count;
147 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
148 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
149 &queue_count), "Queue count get failed");
151 if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
152 if (queue_count > 8) {
154 "test expects the unique priority per queue");
158 /* Configure event queues(0 to n) with
159 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
160 * RTE_EVENT_DEV_PRIORITY_LOWEST
162 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
164 for (i = 0; i < (int)queue_count; i++) {
165 struct rte_event_queue_conf queue_conf;
167 ret = rte_event_queue_default_conf_get(evdev, i,
169 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
171 queue_conf.priority = i * step;
172 ret = rte_event_queue_setup(evdev, i, &queue_conf);
173 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
178 /* Configure event queues with default priority */
179 for (i = 0; i < (int)queue_count; i++) {
180 ret = rte_event_queue_setup(evdev, i, NULL);
181 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
185 /* Configure event ports */
187 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
188 RTE_EVENT_DEV_ATTR_PORT_COUNT,
189 &port_count), "Port count get failed");
190 for (i = 0; i < (int)port_count; i++) {
191 ret = rte_event_port_setup(evdev, i, NULL);
192 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
193 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
194 RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
198 ret = rte_event_dev_start(evdev);
199 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
207 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
211 eventdev_teardown(void)
213 rte_event_dev_stop(evdev);
214 rte_mempool_free(eventdev_test_mempool);
218 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
219 uint32_t flow_id, uint8_t event_type,
220 uint8_t sub_event_type, uint8_t sched_type,
221 uint8_t queue, uint8_t port, uint8_t seq)
223 struct event_attr *attr;
225 /* Store the event attributes in mbuf for future reference */
226 attr = rte_pktmbuf_mtod(m, struct event_attr *);
227 attr->flow_id = flow_id;
228 attr->event_type = event_type;
229 attr->sub_event_type = sub_event_type;
230 attr->sched_type = sched_type;
235 ev->flow_id = flow_id;
236 ev->sub_event_type = sub_event_type;
237 ev->event_type = event_type;
238 /* Inject the new event */
239 ev->op = RTE_EVENT_OP_NEW;
240 ev->sched_type = sched_type;
241 ev->queue_id = queue;
246 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
247 uint8_t sched_type, uint8_t queue, uint8_t port,
253 for (i = 0; i < events; i++) {
254 struct rte_event ev = {.event = 0, .u64 = 0};
256 m = rte_pktmbuf_alloc(eventdev_test_mempool);
257 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
259 update_event_and_validation_attr(m, &ev, flow_id, event_type,
260 sub_event_type, sched_type, queue, port, i);
261 rte_event_enqueue_burst(evdev, port, &ev, 1);
267 check_excess_events(uint8_t port)
270 uint16_t valid_event;
273 /* Check for excess events, try for a few times and exit */
274 for (i = 0; i < 32; i++) {
275 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
277 RTE_TEST_ASSERT_SUCCESS(valid_event,
278 "Unexpected valid event=%d",
279 *dpaa2_seqn(ev.mbuf));
285 generate_random_events(const unsigned int total_events)
287 struct rte_event_dev_info info;
291 uint32_t queue_count;
292 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
293 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
294 &queue_count), "Queue count get failed");
296 ret = rte_event_dev_info_get(evdev, &info);
297 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
298 for (i = 0; i < total_events; i++) {
300 rte_rand() % info.max_event_queue_flows /*flow_id */,
301 RTE_EVENT_TYPE_CPU /* event_type */,
302 rte_rand() % 256 /* sub_event_type */,
303 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
304 rte_rand() % queue_count /* queue */,
315 validate_event(struct rte_event *ev)
317 struct event_attr *attr;
319 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
320 RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
321 "flow_id mismatch enq=%d deq =%d",
322 attr->flow_id, ev->flow_id);
323 RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
324 "event_type mismatch enq=%d deq =%d",
325 attr->event_type, ev->event_type);
326 RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
327 "sub_event_type mismatch enq=%d deq =%d",
328 attr->sub_event_type, ev->sub_event_type);
329 RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
330 "sched_type mismatch enq=%d deq =%d",
331 attr->sched_type, ev->sched_type);
332 RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
333 "queue mismatch enq=%d deq =%d",
334 attr->queue, ev->queue_id);
338 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
339 struct rte_event *ev);
342 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
345 uint16_t valid_event;
346 uint32_t events = 0, forward_progress_cnt = 0, index = 0;
350 if (++forward_progress_cnt > UINT16_MAX) {
351 dpaa2_evdev_err("Detected deadlock");
355 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
359 forward_progress_cnt = 0;
360 ret = validate_event(&ev);
365 ret = fn(index, port, &ev);
366 RTE_TEST_ASSERT_SUCCESS(ret,
367 "Failed to validate test specific event");
372 rte_pktmbuf_free(ev.mbuf);
373 if (++events >= total_events)
377 return check_excess_events(port);
381 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
383 struct event_attr *attr;
385 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
388 RTE_TEST_ASSERT_EQUAL(index, attr->seq,
389 "index=%d != seqn=%d", index, attr->seq);
394 test_simple_enqdeq(uint8_t sched_type)
398 ret = inject_events(0 /*flow_id */,
399 RTE_EVENT_TYPE_CPU /* event_type */,
400 0 /* sub_event_type */,
408 return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
412 test_simple_enqdeq_atomic(void)
414 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
418 test_simple_enqdeq_parallel(void)
420 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
424 * Generate a prescribed number of events and spread them across available
425 * queues. On dequeue, using single event port(port 0) verify the enqueued
429 test_multi_queue_enq_single_port_deq(void)
433 ret = generate_random_events(MAX_EVENTS);
437 return consume_events(0 /* port */, MAX_EVENTS, NULL);
441 worker_multi_port_fn(void *arg)
443 struct test_core_param *param = arg;
445 uint16_t valid_event;
446 uint8_t port = param->port;
447 rte_atomic32_t *total_events = param->total_events;
450 while (rte_atomic32_read(total_events) > 0) {
451 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
455 ret = validate_event(&ev);
456 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
457 rte_pktmbuf_free(ev.mbuf);
458 rte_atomic32_sub(total_events, 1);
464 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
466 uint64_t cycles, print_cycles;
470 print_cycles = cycles = rte_get_timer_cycles();
471 while (rte_eal_get_lcore_state(lcore) != WAIT) {
472 uint64_t new_cycles = rte_get_timer_cycles();
474 if (new_cycles - print_cycles > rte_get_timer_hz()) {
475 dpaa2_evdev_dbg("\r%s: events %d", __func__,
476 rte_atomic32_read(count));
477 print_cycles = new_cycles;
479 if (new_cycles - cycles > rte_get_timer_hz() * 10) {
481 "%s: No schedules for seconds, deadlock (%d)",
483 rte_atomic32_read(count));
484 rte_event_dev_dump(evdev, stdout);
489 rte_eal_mp_wait_lcore();
495 launch_workers_and_wait(int (*main_worker)(void *),
496 int (*workers)(void *), uint32_t total_events,
497 uint8_t nb_workers, uint8_t sched_type)
502 struct test_core_param *param;
503 rte_atomic32_t atomic_total_events;
504 uint64_t dequeue_tmo_ticks;
509 rte_atomic32_set(&atomic_total_events, total_events);
510 RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
512 param = malloc(sizeof(struct test_core_param) * nb_workers);
516 ret = rte_event_dequeue_timeout_ticks(evdev,
517 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
523 param[0].total_events = &atomic_total_events;
524 param[0].sched_type = sched_type;
526 param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
529 w_lcore = rte_get_next_lcore(
533 rte_eal_remote_launch(main_worker, ¶m[0], w_lcore);
535 for (port = 1; port < nb_workers; port++) {
536 param[port].total_events = &atomic_total_events;
537 param[port].sched_type = sched_type;
538 param[port].port = port;
539 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
541 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
542 rte_eal_remote_launch(workers, ¶m[port], w_lcore);
545 ret = wait_workers_to_join(w_lcore, &atomic_total_events);
551 * Generate a prescribed number of events and spread them across available
552 * queues. Dequeue the events through multiple ports and verify the enqueued
556 test_multi_queue_enq_multi_port_deq(void)
558 const unsigned int total_events = MAX_EVENTS;
562 ret = generate_random_events(total_events);
566 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
567 RTE_EVENT_DEV_ATTR_PORT_COUNT,
568 &nr_ports), "Port count get failed");
569 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
572 dpaa2_evdev_err("%s: Not enough ports=%d or workers=%d",
573 __func__, nr_ports, rte_lcore_count() - 1);
577 return launch_workers_and_wait(worker_multi_port_fn,
578 worker_multi_port_fn, total_events,
579 nr_ports, 0xff /* invalid */);
583 void flush(uint8_t dev_id, struct rte_event event, void *arg)
585 unsigned int *count = arg;
587 RTE_SET_USED(dev_id);
588 if (event.event_type == RTE_EVENT_TYPE_CPU)
594 test_dev_stop_flush(void)
596 unsigned int total_events = MAX_EVENTS, count = 0;
599 ret = generate_random_events(total_events);
603 ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
606 rte_event_dev_stop(evdev);
607 ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
610 RTE_TEST_ASSERT_EQUAL(total_events, count,
611 "count mismatch total_events=%d count=%d",
612 total_events, count);
617 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
618 struct rte_event *ev)
621 RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
622 "queue mismatch enq=%d deq =%d",
628 * Link queue x to port x and check correctness of link by checking
629 * queue_id == x on dequeue on the specific port x
632 test_queue_to_port_single_link(void)
634 int i, nr_links, ret;
638 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
639 RTE_EVENT_DEV_ATTR_PORT_COUNT,
640 &port_count), "Port count get failed");
642 /* Unlink all connections that created in eventdev_setup */
643 for (i = 0; i < (int)port_count; i++) {
644 ret = rte_event_port_unlink(evdev, i, NULL, 0);
645 RTE_TEST_ASSERT(ret >= 0,
646 "Failed to unlink all queues port=%d", i);
649 uint32_t queue_count;
651 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
652 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
653 &queue_count), "Queue count get failed");
655 nr_links = RTE_MIN(port_count, queue_count);
656 const unsigned int total_events = MAX_EVENTS / nr_links;
658 /* Link queue x to port x and inject events to queue x through port x */
659 for (i = 0; i < nr_links; i++) {
660 uint8_t queue = (uint8_t)i;
662 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
663 RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
667 RTE_EVENT_TYPE_CPU /* event_type */,
668 rte_rand() % 256 /* sub_event_type */,
669 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
672 total_events /* events */);
677 /* Verify the events generated from correct queue */
678 for (i = 0; i < nr_links; i++) {
679 ret = consume_events(i /* port */, total_events,
680 validate_queue_to_port_single_link);
689 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
690 struct rte_event *ev)
693 RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
694 "queue mismatch enq=%d deq =%d",
700 * Link all even number of queues to port 0 and all odd number of queues to
701 * port 1 and verify the link connection on dequeue
704 test_queue_to_port_multi_link(void)
706 int ret, port0_events = 0, port1_events = 0;
708 uint32_t nr_queues = 0;
709 uint32_t nr_ports = 0;
711 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
712 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
713 &nr_queues), "Queue count get failed");
715 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
716 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
717 &nr_queues), "Queue count get failed");
718 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
719 RTE_EVENT_DEV_ATTR_PORT_COUNT,
720 &nr_ports), "Port count get failed");
723 dpaa2_evdev_err("%s: Not enough ports to test ports=%d",
728 /* Unlink all connections that created in eventdev_setup */
729 for (port = 0; port < nr_ports; port++) {
730 ret = rte_event_port_unlink(evdev, port, NULL, 0);
731 RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
735 const unsigned int total_events = MAX_EVENTS / nr_queues;
737 /* Link all even number of queues to port0 and odd numbers to port 1*/
738 for (queue = 0; queue < nr_queues; queue++) {
740 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
741 RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
746 RTE_EVENT_TYPE_CPU /* event_type */,
747 rte_rand() % 256 /* sub_event_type */,
748 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
751 total_events /* events */);
756 port0_events += total_events;
758 port1_events += total_events;
761 ret = consume_events(0 /* port */, port0_events,
762 validate_queue_to_port_multi_link);
765 ret = consume_events(1 /* port */, port1_events,
766 validate_queue_to_port_multi_link);
773 static void dpaa2_test_run(int (*setup)(void), void (*tdown)(void),
774 int (*test)(void), const char *name)
777 RTE_LOG(INFO, PMD, "Error setting up test %s", name);
782 RTE_LOG(INFO, PMD, "%s Failed\n", name);
785 RTE_LOG(INFO, PMD, "%s Passed", name);
794 test_eventdev_dpaa2(void)
798 DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
799 test_simple_enqdeq_atomic);
800 DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
801 test_simple_enqdeq_parallel);
802 DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
803 test_multi_queue_enq_single_port_deq);
804 DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
805 test_dev_stop_flush);
806 DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
807 test_multi_queue_enq_multi_port_deq);
808 DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
809 test_queue_to_port_single_link);
810 DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
811 test_queue_to_port_multi_link);
813 DPAA2_EVENTDEV_INFO("Total tests : %d", total);
814 DPAA2_EVENTDEV_INFO("Passed : %d", passed);
815 DPAA2_EVENTDEV_INFO("Failed : %d", failed);
816 DPAA2_EVENTDEV_INFO("Not supported : %d", unsupported);
818 testsuite_teardown();