1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_common.h>
6 #include <rte_cycles.h>
9 #include <rte_ethdev.h>
10 #include <rte_eventdev.h>
11 #include <rte_hexdump.h>
12 #include <rte_launch.h>
13 #include <rte_lcore.h>
14 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_per_lcore.h>
18 #include <rte_random.h>
21 #include "cnxk_eventdev.h"
23 #define NUM_PACKETS (1024)
24 #define MAX_EVENTS (1024)
25 #define MAX_STAGES (255)
27 #define CNXK_TEST_RUN(setup, teardown, test) \
28 cnxk_test_run(setup, teardown, test, #test)
33 static int unsupported;
36 static struct rte_mempool *eventdev_test_mempool;
41 uint8_t sub_event_type;
47 static uint32_t seqn_list_index;
48 static int seqn_list[NUM_PACKETS];
53 RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
54 memset(seqn_list, 0, sizeof(seqn_list));
59 seqn_list_update(int val)
61 if (seqn_list_index >= NUM_PACKETS)
64 seqn_list[seqn_list_index++] = val;
65 rte_atomic_thread_fence(__ATOMIC_RELEASE);
70 seqn_list_check(int limit)
74 for (i = 0; i < limit; i++) {
75 if (seqn_list[i] != i) {
76 plt_err("Seqn mismatch %d %d", seqn_list[i], i);
83 struct test_core_param {
84 uint32_t *total_events;
85 uint64_t dequeue_tmo_ticks;
91 testsuite_setup(const char *eventdev_name)
93 evdev = rte_event_dev_get_dev_id(eventdev_name);
95 plt_err("%d: Eventdev %s not found", __LINE__, eventdev_name);
102 testsuite_teardown(void)
104 rte_event_dev_close(evdev);
112 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
113 struct rte_event_dev_info *info)
115 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
116 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
117 dev_conf->nb_event_ports = info->max_event_ports;
118 dev_conf->nb_event_queues = info->max_event_queues;
119 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
120 dev_conf->nb_event_port_dequeue_depth =
121 info->max_event_port_dequeue_depth;
122 dev_conf->nb_event_port_enqueue_depth =
123 info->max_event_port_enqueue_depth;
124 dev_conf->nb_event_port_enqueue_depth =
125 info->max_event_port_enqueue_depth;
126 dev_conf->nb_events_limit = info->max_num_events;
130 TEST_EVENTDEV_SETUP_DEFAULT,
131 TEST_EVENTDEV_SETUP_PRIORITY,
132 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
136 _eventdev_setup(int mode)
138 const char *pool_name = "evdev_cnxk_test_pool";
139 struct rte_event_dev_config dev_conf;
140 struct rte_event_dev_info info;
143 /* Create and destrory pool for each test case to make it standalone */
144 eventdev_test_mempool = rte_pktmbuf_pool_create(
145 pool_name, MAX_EVENTS, 0, 0, 512, rte_socket_id());
146 if (!eventdev_test_mempool) {
147 plt_err("ERROR creating mempool");
151 ret = rte_event_dev_info_get(evdev, &info);
152 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
154 devconf_set_default_sane_values(&dev_conf, &info);
155 if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
156 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
158 ret = rte_event_dev_configure(evdev, &dev_conf);
159 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
161 uint32_t queue_count;
162 RTE_TEST_ASSERT_SUCCESS(
163 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
165 "Queue count get failed");
167 if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
171 /* Configure event queues(0 to n) with
172 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
173 * RTE_EVENT_DEV_PRIORITY_LOWEST
176 (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) / queue_count;
177 for (i = 0; i < (int)queue_count; i++) {
178 struct rte_event_queue_conf queue_conf;
180 ret = rte_event_queue_default_conf_get(evdev, i,
182 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
184 queue_conf.priority = i * step;
185 ret = rte_event_queue_setup(evdev, i, &queue_conf);
186 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
191 /* Configure event queues with default priority */
192 for (i = 0; i < (int)queue_count; i++) {
193 ret = rte_event_queue_setup(evdev, i, NULL);
194 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
198 /* Configure event ports */
200 RTE_TEST_ASSERT_SUCCESS(
201 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
203 "Port count get failed");
204 for (i = 0; i < (int)port_count; i++) {
205 ret = rte_event_port_setup(evdev, i, NULL);
206 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
207 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
208 RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
212 ret = rte_event_dev_start(evdev);
213 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
221 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
225 eventdev_setup_priority(void)
227 return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
231 eventdev_setup_dequeue_timeout(void)
233 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
237 eventdev_teardown(void)
239 rte_event_dev_stop(evdev);
240 rte_mempool_free(eventdev_test_mempool);
244 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
245 uint32_t flow_id, uint8_t event_type,
246 uint8_t sub_event_type, uint8_t sched_type,
247 uint8_t queue, uint8_t port)
249 struct event_attr *attr;
251 /* Store the event attributes in mbuf for future reference */
252 attr = rte_pktmbuf_mtod(m, struct event_attr *);
253 attr->flow_id = flow_id;
254 attr->event_type = event_type;
255 attr->sub_event_type = sub_event_type;
256 attr->sched_type = sched_type;
260 ev->flow_id = flow_id;
261 ev->sub_event_type = sub_event_type;
262 ev->event_type = event_type;
263 /* Inject the new event */
264 ev->op = RTE_EVENT_OP_NEW;
265 ev->sched_type = sched_type;
266 ev->queue_id = queue;
271 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
272 uint8_t sched_type, uint8_t queue, uint8_t port,
278 for (i = 0; i < events; i++) {
279 struct rte_event ev = {.event = 0, .u64 = 0};
281 m = rte_pktmbuf_alloc(eventdev_test_mempool);
282 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
284 *rte_event_pmd_selftest_seqn(m) = i;
285 update_event_and_validation_attr(m, &ev, flow_id, event_type,
286 sub_event_type, sched_type,
288 rte_event_enqueue_burst(evdev, port, &ev, 1);
294 check_excess_events(uint8_t port)
296 uint16_t valid_event;
300 /* Check for excess events, try for a few times and exit */
301 for (i = 0; i < 32; i++) {
302 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
304 RTE_TEST_ASSERT_SUCCESS(valid_event,
305 "Unexpected valid event=%d",
306 *rte_event_pmd_selftest_seqn(ev.mbuf));
312 generate_random_events(const unsigned int total_events)
314 struct rte_event_dev_info info;
315 uint32_t queue_count;
319 RTE_TEST_ASSERT_SUCCESS(
320 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
322 "Queue count get failed");
324 ret = rte_event_dev_info_get(evdev, &info);
325 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
326 for (i = 0; i < total_events; i++) {
328 rte_rand() % info.max_event_queue_flows /*flow_id */,
329 RTE_EVENT_TYPE_CPU /* event_type */,
330 rte_rand() % 256 /* sub_event_type */,
331 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
332 rte_rand() % queue_count /* queue */, 0 /* port */,
341 validate_event(struct rte_event *ev)
343 struct event_attr *attr;
345 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
346 RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
347 "flow_id mismatch enq=%d deq =%d", attr->flow_id,
349 RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
350 "event_type mismatch enq=%d deq =%d",
351 attr->event_type, ev->event_type);
352 RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
353 "sub_event_type mismatch enq=%d deq =%d",
354 attr->sub_event_type, ev->sub_event_type);
355 RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
356 "sched_type mismatch enq=%d deq =%d",
357 attr->sched_type, ev->sched_type);
358 RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
359 "queue mismatch enq=%d deq =%d", attr->queue,
364 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
365 struct rte_event *ev);
368 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
370 uint32_t events = 0, forward_progress_cnt = 0, index = 0;
371 uint16_t valid_event;
376 if (++forward_progress_cnt > UINT16_MAX) {
377 plt_err("Detected deadlock");
381 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
385 forward_progress_cnt = 0;
386 ret = validate_event(&ev);
391 ret = fn(index, port, &ev);
392 RTE_TEST_ASSERT_SUCCESS(
393 ret, "Failed to validate test specific event");
398 rte_pktmbuf_free(ev.mbuf);
399 if (++events >= total_events)
403 return check_excess_events(port);
407 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
410 RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),
411 "index=%d != seqn=%d", index,
412 *rte_event_pmd_selftest_seqn(ev->mbuf));
417 test_simple_enqdeq(uint8_t sched_type)
421 ret = inject_events(0 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */,
422 0 /* sub_event_type */, sched_type, 0 /* queue */,
423 0 /* port */, MAX_EVENTS);
427 return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
431 test_simple_enqdeq_ordered(void)
433 return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
437 test_simple_enqdeq_atomic(void)
439 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
443 test_simple_enqdeq_parallel(void)
445 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
449 * Generate a prescribed number of events and spread them across available
450 * queues. On dequeue, using single event port(port 0) verify the enqueued
454 test_multi_queue_enq_single_port_deq(void)
458 ret = generate_random_events(MAX_EVENTS);
462 return consume_events(0 /* port */, MAX_EVENTS, NULL);
466 * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
469 * For example, Inject 32 events over 0..7 queues
470 * enqueue events 0, 8, 16, 24 in queue 0
471 * enqueue events 1, 9, 17, 25 in queue 1
474 * enqueue events 7, 15, 23, 31 in queue 7
476 * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
477 * order from queue0(highest priority) to queue7(lowest_priority)
480 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
482 uint32_t queue_count;
484 RTE_TEST_ASSERT_SUCCESS(
485 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
487 "Queue count get failed");
490 uint32_t range = MAX_EVENTS / queue_count;
491 uint32_t expected_val = (index % range) * queue_count;
493 expected_val += ev->queue_id;
495 RTE_TEST_ASSERT_EQUAL(
496 *rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,
497 "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
498 *rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val,
499 range, queue_count, MAX_EVENTS);
504 test_multi_queue_priority(void)
506 int i, max_evts_roundoff;
507 /* See validate_queue_priority() comments for priority validate logic */
508 uint32_t queue_count;
512 RTE_TEST_ASSERT_SUCCESS(
513 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
515 "Queue count get failed");
518 max_evts_roundoff = MAX_EVENTS / queue_count;
519 max_evts_roundoff *= queue_count;
521 for (i = 0; i < max_evts_roundoff; i++) {
522 struct rte_event ev = {.event = 0, .u64 = 0};
524 m = rte_pktmbuf_alloc(eventdev_test_mempool);
525 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
527 *rte_event_pmd_selftest_seqn(m) = i;
528 queue = i % queue_count;
529 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
530 0, RTE_SCHED_TYPE_PARALLEL,
532 rte_event_enqueue_burst(evdev, 0, &ev, 1);
535 return consume_events(0, max_evts_roundoff, validate_queue_priority);
539 worker_multi_port_fn(void *arg)
541 struct test_core_param *param = arg;
542 uint32_t *total_events = param->total_events;
543 uint8_t port = param->port;
544 uint16_t valid_event;
548 while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
549 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
553 ret = validate_event(&ev);
554 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
555 rte_pktmbuf_free(ev.mbuf);
556 __atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);
563 wait_workers_to_join(const uint32_t *count)
565 uint64_t cycles, print_cycles;
567 cycles = rte_get_timer_cycles();
568 print_cycles = cycles;
569 while (__atomic_load_n(count, __ATOMIC_RELAXED)) {
570 uint64_t new_cycles = rte_get_timer_cycles();
572 if (new_cycles - print_cycles > rte_get_timer_hz()) {
573 plt_info("Events %d",
574 __atomic_load_n(count, __ATOMIC_RELAXED));
575 print_cycles = new_cycles;
577 if (new_cycles - cycles > rte_get_timer_hz() * 10000000000) {
578 plt_err("No schedules for seconds, deadlock (%d)",
579 __atomic_load_n(count, __ATOMIC_RELAXED));
580 rte_event_dev_dump(evdev, stdout);
585 rte_eal_mp_wait_lcore();
591 launch_workers_and_wait(int (*main_thread)(void *),
592 int (*worker_thread)(void *), uint32_t total_events,
593 uint8_t nb_workers, uint8_t sched_type)
595 uint32_t atomic_total_events;
596 struct test_core_param *param;
597 uint64_t dequeue_tmo_ticks;
605 __atomic_store_n(&atomic_total_events, total_events, __ATOMIC_RELAXED);
608 param = malloc(sizeof(struct test_core_param) * nb_workers);
612 ret = rte_event_dequeue_timeout_ticks(
613 evdev, rte_rand() % 10000000 /* 10ms */, &dequeue_tmo_ticks);
619 param[0].total_events = &atomic_total_events;
620 param[0].sched_type = sched_type;
622 param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
625 w_lcore = rte_get_next_lcore(
629 rte_eal_remote_launch(main_thread, ¶m[0], w_lcore);
631 for (port = 1; port < nb_workers; port++) {
632 param[port].total_events = &atomic_total_events;
633 param[port].sched_type = sched_type;
634 param[port].port = port;
635 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
636 rte_atomic_thread_fence(__ATOMIC_RELEASE);
637 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
638 rte_eal_remote_launch(worker_thread, ¶m[port], w_lcore);
641 rte_atomic_thread_fence(__ATOMIC_RELEASE);
642 ret = wait_workers_to_join(&atomic_total_events);
649 * Generate a prescribed number of events and spread them across available
650 * queues. Dequeue the events through multiple ports and verify the enqueued
654 test_multi_queue_enq_multi_port_deq(void)
656 const unsigned int total_events = MAX_EVENTS;
660 ret = generate_random_events(total_events);
664 RTE_TEST_ASSERT_SUCCESS(
665 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
667 "Port count get failed");
668 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
671 plt_err("Not enough ports=%d or workers=%d", nr_ports,
672 rte_lcore_count() - 1);
676 return launch_workers_and_wait(worker_multi_port_fn,
677 worker_multi_port_fn, total_events,
678 nr_ports, 0xff /* invalid */);
682 flush(uint8_t dev_id, struct rte_event event, void *arg)
684 unsigned int *count = arg;
686 RTE_SET_USED(dev_id);
687 if (event.event_type == RTE_EVENT_TYPE_CPU)
692 test_dev_stop_flush(void)
694 unsigned int total_events = MAX_EVENTS, count = 0;
697 ret = generate_random_events(total_events);
701 ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
704 rte_event_dev_stop(evdev);
705 ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
708 RTE_TEST_ASSERT_EQUAL(total_events, count,
709 "count mismatch total_events=%d count=%d",
710 total_events, count);
716 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
717 struct rte_event *ev)
720 RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
721 "queue mismatch enq=%d deq =%d", port,
728 * Link queue x to port x and check correctness of link by checking
729 * queue_id == x on dequeue on the specific port x
732 test_queue_to_port_single_link(void)
734 int i, nr_links, ret;
735 uint32_t queue_count;
738 RTE_TEST_ASSERT_SUCCESS(
739 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
741 "Port count get failed");
743 /* Unlink all connections that created in eventdev_setup */
744 for (i = 0; i < (int)port_count; i++) {
745 ret = rte_event_port_unlink(evdev, i, NULL, 0);
746 RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
750 RTE_TEST_ASSERT_SUCCESS(
751 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
753 "Queue count get failed");
755 nr_links = RTE_MIN(port_count, queue_count);
756 const unsigned int total_events = MAX_EVENTS / nr_links;
758 /* Link queue x to port x and inject events to queue x through port x */
759 for (i = 0; i < nr_links; i++) {
760 uint8_t queue = (uint8_t)i;
762 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
763 RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
765 ret = inject_events(0x100 /*flow_id */,
766 RTE_EVENT_TYPE_CPU /* event_type */,
767 rte_rand() % 256 /* sub_event_type */,
768 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
769 queue /* queue */, i /* port */,
770 total_events /* events */);
775 /* Verify the events generated from correct queue */
776 for (i = 0; i < nr_links; i++) {
777 ret = consume_events(i /* port */, total_events,
778 validate_queue_to_port_single_link);
787 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
788 struct rte_event *ev)
791 RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
792 "queue mismatch enq=%d deq =%d", port,
799 * Link all even number of queues to port 0 and all odd number of queues to
800 * port 1 and verify the link connection on dequeue
803 test_queue_to_port_multi_link(void)
805 int ret, port0_events = 0, port1_events = 0;
806 uint32_t nr_queues = 0;
807 uint32_t nr_ports = 0;
810 RTE_TEST_ASSERT_SUCCESS(
811 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
813 "Queue count get failed");
814 RTE_TEST_ASSERT_SUCCESS(
815 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
817 "Queue count get failed");
818 RTE_TEST_ASSERT_SUCCESS(
819 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
821 "Port count get failed");
824 plt_err("Not enough ports to test ports=%d", nr_ports);
828 /* Unlink all connections that created in eventdev_setup */
829 for (port = 0; port < nr_ports; port++) {
830 ret = rte_event_port_unlink(evdev, port, NULL, 0);
831 RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
835 unsigned int total_events = MAX_EVENTS / nr_queues;
837 nr_queues = MAX_EVENTS;
838 total_events = MAX_EVENTS / nr_queues;
841 /* Link all even number of queues to port0 and odd numbers to port 1*/
842 for (queue = 0; queue < nr_queues; queue++) {
844 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
845 RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
848 ret = inject_events(0x100 /*flow_id */,
849 RTE_EVENT_TYPE_CPU /* event_type */,
850 rte_rand() % 256 /* sub_event_type */,
851 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
852 queue /* queue */, port /* port */,
853 total_events /* events */);
858 port0_events += total_events;
860 port1_events += total_events;
863 ret = consume_events(0 /* port */, port0_events,
864 validate_queue_to_port_multi_link);
867 ret = consume_events(1 /* port */, port1_events,
868 validate_queue_to_port_multi_link);
876 worker_flow_based_pipeline(void *arg)
878 struct test_core_param *param = arg;
879 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
880 uint32_t *total_events = param->total_events;
881 uint8_t new_sched_type = param->sched_type;
882 uint8_t port = param->port;
883 uint16_t valid_event;
886 while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
887 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
892 /* Events from stage 0 */
893 if (ev.sub_event_type == 0) {
894 /* Move to atomic flow to maintain the ordering */
896 ev.event_type = RTE_EVENT_TYPE_CPU;
897 ev.sub_event_type = 1; /* stage 1 */
898 ev.sched_type = new_sched_type;
899 ev.op = RTE_EVENT_OP_FORWARD;
900 rte_event_enqueue_burst(evdev, port, &ev, 1);
901 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
902 uint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);
904 if (seqn_list_update(seqn) == 0) {
905 rte_pktmbuf_free(ev.mbuf);
906 __atomic_sub_fetch(total_events, 1,
909 plt_err("Failed to update seqn_list");
913 plt_err("Invalid ev.sub_event_type = %d",
922 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
923 uint8_t out_sched_type)
925 const unsigned int total_events = MAX_EVENTS;
929 RTE_TEST_ASSERT_SUCCESS(
930 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
932 "Port count get failed");
933 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
936 plt_err("Not enough ports=%d or workers=%d", nr_ports,
937 rte_lcore_count() - 1);
941 /* Injects events with a 0 sequence number to total_events */
943 0x1 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */,
944 0 /* sub_event_type (stage 0) */, in_sched_type, 0 /* queue */,
945 0 /* port */, total_events /* events */);
950 ret = launch_workers_and_wait(worker_flow_based_pipeline,
951 worker_flow_based_pipeline, total_events,
952 nr_ports, out_sched_type);
956 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
957 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
958 /* Check the events order maintained or not */
959 return seqn_list_check(total_events);
965 /* Multi port ordered to atomic transaction */
967 test_multi_port_flow_ordered_to_atomic(void)
969 /* Ingress event order test */
970 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
971 RTE_SCHED_TYPE_ATOMIC);
975 test_multi_port_flow_ordered_to_ordered(void)
977 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
978 RTE_SCHED_TYPE_ORDERED);
982 test_multi_port_flow_ordered_to_parallel(void)
984 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
985 RTE_SCHED_TYPE_PARALLEL);
989 test_multi_port_flow_atomic_to_atomic(void)
991 /* Ingress event order test */
992 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
993 RTE_SCHED_TYPE_ATOMIC);
997 test_multi_port_flow_atomic_to_ordered(void)
999 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1000 RTE_SCHED_TYPE_ORDERED);
1004 test_multi_port_flow_atomic_to_parallel(void)
1006 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1007 RTE_SCHED_TYPE_PARALLEL);
1011 test_multi_port_flow_parallel_to_atomic(void)
1013 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1014 RTE_SCHED_TYPE_ATOMIC);
1018 test_multi_port_flow_parallel_to_ordered(void)
1020 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1021 RTE_SCHED_TYPE_ORDERED);
1025 test_multi_port_flow_parallel_to_parallel(void)
1027 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1028 RTE_SCHED_TYPE_PARALLEL);
1032 worker_group_based_pipeline(void *arg)
1034 struct test_core_param *param = arg;
1035 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
1036 uint32_t *total_events = param->total_events;
1037 uint8_t new_sched_type = param->sched_type;
1038 uint8_t port = param->port;
1039 uint16_t valid_event;
1040 struct rte_event ev;
1042 while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
1043 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
1048 /* Events from stage 0(group 0) */
1049 if (ev.queue_id == 0) {
1050 /* Move to atomic flow to maintain the ordering */
1052 ev.event_type = RTE_EVENT_TYPE_CPU;
1053 ev.sched_type = new_sched_type;
1054 ev.queue_id = 1; /* Stage 1*/
1055 ev.op = RTE_EVENT_OP_FORWARD;
1056 rte_event_enqueue_burst(evdev, port, &ev, 1);
1057 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
1058 uint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);
1060 if (seqn_list_update(seqn) == 0) {
1061 rte_pktmbuf_free(ev.mbuf);
1062 __atomic_sub_fetch(total_events, 1,
1065 plt_err("Failed to update seqn_list");
1069 plt_err("Invalid ev.queue_id = %d", ev.queue_id);
1078 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1079 uint8_t out_sched_type)
1081 const unsigned int total_events = MAX_EVENTS;
1082 uint32_t queue_count;
1086 RTE_TEST_ASSERT_SUCCESS(
1087 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
1089 "Port count get failed");
1091 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1093 RTE_TEST_ASSERT_SUCCESS(
1094 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1096 "Queue count get failed");
1097 if (queue_count < 2 || !nr_ports) {
1098 plt_err("Not enough queues=%d ports=%d or workers=%d",
1099 queue_count, nr_ports, rte_lcore_count() - 1);
1103 /* Injects events with a 0 sequence number to total_events */
1104 ret = inject_events(
1105 0x1 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */,
1106 0 /* sub_event_type (stage 0) */, in_sched_type, 0 /* queue */,
1107 0 /* port */, total_events /* events */);
1111 ret = launch_workers_and_wait(worker_group_based_pipeline,
1112 worker_group_based_pipeline, total_events,
1113 nr_ports, out_sched_type);
1117 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1118 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1119 /* Check the events order maintained or not */
1120 return seqn_list_check(total_events);
1127 test_multi_port_queue_ordered_to_atomic(void)
1129 /* Ingress event order test */
1130 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1131 RTE_SCHED_TYPE_ATOMIC);
1135 test_multi_port_queue_ordered_to_ordered(void)
1137 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1138 RTE_SCHED_TYPE_ORDERED);
1142 test_multi_port_queue_ordered_to_parallel(void)
1144 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1145 RTE_SCHED_TYPE_PARALLEL);
1149 test_multi_port_queue_atomic_to_atomic(void)
1151 /* Ingress event order test */
1152 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1153 RTE_SCHED_TYPE_ATOMIC);
1157 test_multi_port_queue_atomic_to_ordered(void)
1159 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1160 RTE_SCHED_TYPE_ORDERED);
1164 test_multi_port_queue_atomic_to_parallel(void)
1166 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1167 RTE_SCHED_TYPE_PARALLEL);
1171 test_multi_port_queue_parallel_to_atomic(void)
1173 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1174 RTE_SCHED_TYPE_ATOMIC);
1178 test_multi_port_queue_parallel_to_ordered(void)
1180 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1181 RTE_SCHED_TYPE_ORDERED);
1185 test_multi_port_queue_parallel_to_parallel(void)
1187 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1188 RTE_SCHED_TYPE_PARALLEL);
1192 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1194 struct test_core_param *param = arg;
1195 uint32_t *total_events = param->total_events;
1196 uint8_t port = param->port;
1197 uint16_t valid_event;
1198 struct rte_event ev;
1200 while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
1201 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1205 if (ev.sub_event_type == MAX_STAGES) { /* last stage */
1206 rte_pktmbuf_free(ev.mbuf);
1207 __atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);
1209 ev.event_type = RTE_EVENT_TYPE_CPU;
1210 ev.sub_event_type++;
1212 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1213 ev.op = RTE_EVENT_OP_FORWARD;
1214 rte_event_enqueue_burst(evdev, port, &ev, 1);
1222 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1227 RTE_TEST_ASSERT_SUCCESS(
1228 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
1230 "Port count get failed");
1231 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1234 plt_err("Not enough ports=%d or workers=%d", nr_ports,
1235 rte_lcore_count() - 1);
1239 /* Injects events with a 0 sequence number to total_events */
1240 ret = inject_events(
1241 0x1 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */,
1242 0 /* sub_event_type (stage 0) */,
1243 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1244 0 /* queue */, 0 /* port */, MAX_EVENTS /* events */);
1248 return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1249 0xff /* invalid */);
1252 /* Flow based pipeline with maximum stages with random sched type */
1254 test_multi_port_flow_max_stages_random_sched_type(void)
1256 return launch_multi_port_max_stages_random_sched_type(
1257 worker_flow_based_pipeline_max_stages_rand_sched_type);
1261 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1263 struct test_core_param *param = arg;
1264 uint8_t port = param->port;
1265 uint32_t queue_count;
1266 uint16_t valid_event;
1267 struct rte_event ev;
1269 RTE_TEST_ASSERT_SUCCESS(
1270 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1272 "Queue count get failed");
1273 uint8_t nr_queues = queue_count;
1274 uint32_t *total_events = param->total_events;
1276 while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
1277 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1281 if (ev.queue_id == nr_queues - 1) { /* last stage */
1282 rte_pktmbuf_free(ev.mbuf);
1283 __atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);
1285 ev.event_type = RTE_EVENT_TYPE_CPU;
1288 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1289 ev.op = RTE_EVENT_OP_FORWARD;
1290 rte_event_enqueue_burst(evdev, port, &ev, 1);
1297 /* Queue based pipeline with maximum stages with random sched type */
1299 test_multi_port_queue_max_stages_random_sched_type(void)
1301 return launch_multi_port_max_stages_random_sched_type(
1302 worker_queue_based_pipeline_max_stages_rand_sched_type);
1306 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1308 struct test_core_param *param = arg;
1309 uint8_t port = param->port;
1310 uint32_t queue_count;
1311 uint16_t valid_event;
1312 struct rte_event ev;
1314 RTE_TEST_ASSERT_SUCCESS(
1315 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1317 "Queue count get failed");
1318 uint8_t nr_queues = queue_count;
1319 uint32_t *total_events = param->total_events;
1321 while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
1322 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1326 if (ev.queue_id == nr_queues - 1) { /* Last stage */
1327 rte_pktmbuf_free(ev.mbuf);
1328 __atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);
1330 ev.event_type = RTE_EVENT_TYPE_CPU;
1332 ev.sub_event_type = rte_rand() % 256;
1334 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1335 ev.op = RTE_EVENT_OP_FORWARD;
1336 rte_event_enqueue_burst(evdev, port, &ev, 1);
1343 /* Queue and flow based pipeline with maximum stages with random sched type */
1345 test_multi_port_mixed_max_stages_random_sched_type(void)
1347 return launch_multi_port_max_stages_random_sched_type(
1348 worker_mixed_pipeline_max_stages_rand_sched_type);
1352 worker_ordered_flow_producer(void *arg)
1354 struct test_core_param *param = arg;
1355 uint8_t port = param->port;
1359 while (counter < NUM_PACKETS) {
1360 m = rte_pktmbuf_alloc(eventdev_test_mempool);
1364 *rte_event_pmd_selftest_seqn(m) = counter++;
1366 struct rte_event ev = {.event = 0, .u64 = 0};
1368 ev.flow_id = 0x1; /* Generate a fat flow */
1369 ev.sub_event_type = 0;
1370 /* Inject the new event */
1371 ev.op = RTE_EVENT_OP_NEW;
1372 ev.event_type = RTE_EVENT_TYPE_CPU;
1373 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1376 rte_event_enqueue_burst(evdev, port, &ev, 1);
1383 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1387 RTE_TEST_ASSERT_SUCCESS(
1388 rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
1390 "Port count get failed");
1391 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1393 if (rte_lcore_count() < 3 || nr_ports < 2) {
1394 plt_err("### Not enough cores for test.");
1398 launch_workers_and_wait(worker_ordered_flow_producer, fn, NUM_PACKETS,
1399 nr_ports, RTE_SCHED_TYPE_ATOMIC);
1400 /* Check the events order maintained or not */
1401 return seqn_list_check(NUM_PACKETS);
1404 /* Flow based producer consumer ingress order test */
1406 test_flow_producer_consumer_ingress_order_test(void)
1408 return test_producer_consumer_ingress_order_test(
1409 worker_flow_based_pipeline);
1412 /* Queue based producer consumer ingress order test */
1414 test_queue_producer_consumer_ingress_order_test(void)
1416 return test_producer_consumer_ingress_order_test(
1417 worker_group_based_pipeline);
1421 cnxk_test_run(int (*setup)(void), void (*tdown)(void), int (*test)(void),
1425 printf("Error setting up test %s", name);
1430 printf("+ TestCase [%2d] : %s failed\n", total, name);
1433 printf("+ TestCase [%2d] : %s succeeded\n", total,
1443 cnxk_sso_testsuite_run(const char *dev_name)
1447 testsuite_setup(dev_name);
1449 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1450 test_simple_enqdeq_ordered);
1451 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1452 test_simple_enqdeq_atomic);
1453 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1454 test_simple_enqdeq_parallel);
1455 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1456 test_multi_queue_enq_single_port_deq);
1457 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown, test_dev_stop_flush);
1458 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1459 test_multi_queue_enq_multi_port_deq);
1460 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1461 test_queue_to_port_single_link);
1462 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1463 test_queue_to_port_multi_link);
1464 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1465 test_multi_port_flow_ordered_to_atomic);
1466 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1467 test_multi_port_flow_ordered_to_ordered);
1468 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1469 test_multi_port_flow_ordered_to_parallel);
1470 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1471 test_multi_port_flow_atomic_to_atomic);
1472 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1473 test_multi_port_flow_atomic_to_ordered);
1474 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1475 test_multi_port_flow_atomic_to_parallel);
1476 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1477 test_multi_port_flow_parallel_to_atomic);
1478 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1479 test_multi_port_flow_parallel_to_ordered);
1480 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1481 test_multi_port_flow_parallel_to_parallel);
1482 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1483 test_multi_port_queue_ordered_to_atomic);
1484 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1485 test_multi_port_queue_ordered_to_ordered);
1486 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1487 test_multi_port_queue_ordered_to_parallel);
1488 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1489 test_multi_port_queue_atomic_to_atomic);
1490 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1491 test_multi_port_queue_atomic_to_ordered);
1492 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1493 test_multi_port_queue_atomic_to_parallel);
1494 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1495 test_multi_port_queue_parallel_to_atomic);
1496 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1497 test_multi_port_queue_parallel_to_ordered);
1498 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1499 test_multi_port_queue_parallel_to_parallel);
1500 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1501 test_multi_port_flow_max_stages_random_sched_type);
1502 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1503 test_multi_port_queue_max_stages_random_sched_type);
1504 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1505 test_multi_port_mixed_max_stages_random_sched_type);
1506 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1507 test_flow_producer_consumer_ingress_order_test);
1508 CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
1509 test_queue_producer_consumer_ingress_order_test);
1510 CNXK_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
1511 test_multi_queue_priority);
1512 CNXK_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1513 test_multi_port_flow_ordered_to_atomic);
1514 CNXK_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1515 test_multi_port_queue_ordered_to_atomic);
1516 printf("Total tests : %d\n", total);
1517 printf("Passed : %d\n", passed);
1518 printf("Failed : %d\n", failed);
1519 printf("Not supported : %d\n", unsupported);
1522 testsuite_teardown();
1528 cnxk_sso_selftest(const char *dev_name)
1530 const struct rte_memzone *mz;
1531 struct cnxk_sso_evdev *dev;
1534 mz = rte_memzone_lookup(CNXK_SSO_MZ_NAME);
1538 dev = (void *)*((uint64_t *)mz->addr);
1539 if (roc_model_runtime_is_cn9k()) {
1540 /* Verify single ws mode. */
1541 printf("Verifying CN9K Single workslot mode\n");
1543 cn9k_sso_set_rsrc(dev);
1544 if (cnxk_sso_testsuite_run(dev_name))
1546 /* Verift dual ws mode. */
1547 printf("Verifying CN9K Dual workslot mode\n");
1549 cn9k_sso_set_rsrc(dev);
1550 if (cnxk_sso_testsuite_run(dev_name))
1554 if (roc_model_runtime_is_cn10k()) {
1555 printf("Verifying CN10K workslot getwork mode none\n");
1556 dev->gw_mode = CN10K_GW_MODE_NONE;
1557 if (cnxk_sso_testsuite_run(dev_name))
1559 printf("Verifying CN10K workslot getwork mode prefetch\n");
1560 dev->gw_mode = CN10K_GW_MODE_PREF;
1561 if (cnxk_sso_testsuite_run(dev_name))
1563 printf("Verifying CN10K workslot getwork mode smart prefetch\n");
1564 dev->gw_mode = CN10K_GW_MODE_PREF_WFE;
1565 if (cnxk_sso_testsuite_run(dev_name))