1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
13 #include <rte_launch.h>
14 #include <rte_lcore.h>
16 #include <rte_malloc.h>
17 #include <rte_memcpy.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
22 #include "otx2_evdev.h"
24 #define NUM_PACKETS (1024)
25 #define MAX_EVENTS (1024)
27 #define OCTEONTX2_TEST_RUN(setup, teardown, test) \
28 octeontx_test_run(setup, teardown, test, #test)
33 static int unsupported;
36 static struct rte_mempool *eventdev_test_mempool;
41 uint8_t sub_event_type;
47 static uint32_t seqn_list_index;
48 static int seqn_list[NUM_PACKETS];
53 RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
54 memset(seqn_list, 0, sizeof(seqn_list));
59 seqn_list_update(int val)
61 if (seqn_list_index >= NUM_PACKETS)
64 seqn_list[seqn_list_index++] = val;
70 seqn_list_check(int limit)
74 for (i = 0; i < limit; i++) {
75 if (seqn_list[i] != i) {
76 otx2_err("Seqn mismatch %d %d", seqn_list[i], i);
83 struct test_core_param {
84 rte_atomic32_t *total_events;
85 uint64_t dequeue_tmo_ticks;
93 const char *eventdev_name = "event_octeontx2";
95 evdev = rte_event_dev_get_dev_id(eventdev_name);
97 otx2_err("%d: Eventdev %s not found", __LINE__, eventdev_name);
104 testsuite_teardown(void)
106 rte_event_dev_close(evdev);
110 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
111 struct rte_event_dev_info *info)
113 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
114 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
115 dev_conf->nb_event_ports = info->max_event_ports;
116 dev_conf->nb_event_queues = info->max_event_queues;
117 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
118 dev_conf->nb_event_port_dequeue_depth =
119 info->max_event_port_dequeue_depth;
120 dev_conf->nb_event_port_enqueue_depth =
121 info->max_event_port_enqueue_depth;
122 dev_conf->nb_event_port_enqueue_depth =
123 info->max_event_port_enqueue_depth;
124 dev_conf->nb_events_limit =
125 info->max_num_events;
129 TEST_EVENTDEV_SETUP_DEFAULT,
130 TEST_EVENTDEV_SETUP_PRIORITY,
131 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
135 _eventdev_setup(int mode)
137 const char *pool_name = "evdev_octeontx_test_pool";
138 struct rte_event_dev_config dev_conf;
139 struct rte_event_dev_info info;
142 /* Create and destrory pool for each test case to make it standalone */
143 eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name, MAX_EVENTS,
146 if (!eventdev_test_mempool) {
147 otx2_err("ERROR creating mempool");
151 ret = rte_event_dev_info_get(evdev, &info);
152 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
154 devconf_set_default_sane_values(&dev_conf, &info);
155 if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
156 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
158 ret = rte_event_dev_configure(evdev, &dev_conf);
159 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
161 uint32_t queue_count;
162 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
163 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
164 "Queue count get failed");
166 if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
170 /* Configure event queues(0 to n) with
171 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
172 * RTE_EVENT_DEV_PRIORITY_LOWEST
174 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
176 for (i = 0; i < (int)queue_count; i++) {
177 struct rte_event_queue_conf queue_conf;
179 ret = rte_event_queue_default_conf_get(evdev, i,
181 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
183 queue_conf.priority = i * step;
184 ret = rte_event_queue_setup(evdev, i, &queue_conf);
185 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
190 /* Configure event queues with default priority */
191 for (i = 0; i < (int)queue_count; i++) {
192 ret = rte_event_queue_setup(evdev, i, NULL);
193 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
197 /* Configure event ports */
199 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
200 RTE_EVENT_DEV_ATTR_PORT_COUNT, &port_count),
201 "Port count get failed");
202 for (i = 0; i < (int)port_count; i++) {
203 ret = rte_event_port_setup(evdev, i, NULL);
204 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
205 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
206 RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
210 ret = rte_event_dev_start(evdev);
211 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
219 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
223 eventdev_setup_priority(void)
225 return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
229 eventdev_setup_dequeue_timeout(void)
231 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
235 eventdev_teardown(void)
237 rte_event_dev_stop(evdev);
238 rte_mempool_free(eventdev_test_mempool);
242 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
243 uint32_t flow_id, uint8_t event_type,
244 uint8_t sub_event_type, uint8_t sched_type,
245 uint8_t queue, uint8_t port)
247 struct event_attr *attr;
249 /* Store the event attributes in mbuf for future reference */
250 attr = rte_pktmbuf_mtod(m, struct event_attr *);
251 attr->flow_id = flow_id;
252 attr->event_type = event_type;
253 attr->sub_event_type = sub_event_type;
254 attr->sched_type = sched_type;
258 ev->flow_id = flow_id;
259 ev->sub_event_type = sub_event_type;
260 ev->event_type = event_type;
261 /* Inject the new event */
262 ev->op = RTE_EVENT_OP_NEW;
263 ev->sched_type = sched_type;
264 ev->queue_id = queue;
269 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
270 uint8_t sched_type, uint8_t queue, uint8_t port,
276 for (i = 0; i < events; i++) {
277 struct rte_event ev = {.event = 0, .u64 = 0};
279 m = rte_pktmbuf_alloc(eventdev_test_mempool);
280 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
283 update_event_and_validation_attr(m, &ev, flow_id, event_type,
284 sub_event_type, sched_type,
286 rte_event_enqueue_burst(evdev, port, &ev, 1);
292 check_excess_events(uint8_t port)
294 uint16_t valid_event;
298 /* Check for excess events, try for a few times and exit */
299 for (i = 0; i < 32; i++) {
300 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
302 RTE_TEST_ASSERT_SUCCESS(valid_event,
303 "Unexpected valid event=%d",
310 generate_random_events(const unsigned int total_events)
312 struct rte_event_dev_info info;
313 uint32_t queue_count;
317 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
318 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
319 "Queue count get failed");
321 ret = rte_event_dev_info_get(evdev, &info);
322 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
323 for (i = 0; i < total_events; i++) {
325 rte_rand() % info.max_event_queue_flows /*flow_id */,
326 RTE_EVENT_TYPE_CPU /* event_type */,
327 rte_rand() % 256 /* sub_event_type */,
328 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
329 rte_rand() % queue_count /* queue */,
340 validate_event(struct rte_event *ev)
342 struct event_attr *attr;
344 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
345 RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
346 "flow_id mismatch enq=%d deq =%d",
347 attr->flow_id, ev->flow_id);
348 RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
349 "event_type mismatch enq=%d deq =%d",
350 attr->event_type, ev->event_type);
351 RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
352 "sub_event_type mismatch enq=%d deq =%d",
353 attr->sub_event_type, ev->sub_event_type);
354 RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
355 "sched_type mismatch enq=%d deq =%d",
356 attr->sched_type, ev->sched_type);
357 RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
358 "queue mismatch enq=%d deq =%d",
359 attr->queue, ev->queue_id);
363 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
364 struct rte_event *ev);
367 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
369 uint32_t events = 0, forward_progress_cnt = 0, index = 0;
370 uint16_t valid_event;
375 if (++forward_progress_cnt > UINT16_MAX) {
376 otx2_err("Detected deadlock");
380 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
384 forward_progress_cnt = 0;
385 ret = validate_event(&ev);
390 ret = fn(index, port, &ev);
391 RTE_TEST_ASSERT_SUCCESS(ret,
392 "Failed to validate test specific event");
397 rte_pktmbuf_free(ev.mbuf);
398 if (++events >= total_events)
402 return check_excess_events(port);
406 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
409 RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
410 index, ev->mbuf->seqn);
415 test_simple_enqdeq(uint8_t sched_type)
419 ret = inject_events(0 /*flow_id */,
420 RTE_EVENT_TYPE_CPU /* event_type */,
421 0 /* sub_event_type */,
429 return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
433 test_simple_enqdeq_ordered(void)
435 return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
439 test_simple_enqdeq_atomic(void)
441 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
445 test_simple_enqdeq_parallel(void)
447 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
451 * Generate a prescribed number of events and spread them across available
452 * queues. On dequeue, using single event port(port 0) verify the enqueued
456 test_multi_queue_enq_single_port_deq(void)
460 ret = generate_random_events(MAX_EVENTS);
464 return consume_events(0 /* port */, MAX_EVENTS, NULL);
468 * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
471 * For example, Inject 32 events over 0..7 queues
472 * enqueue events 0, 8, 16, 24 in queue 0
473 * enqueue events 1, 9, 17, 25 in queue 1
476 * enqueue events 7, 15, 23, 31 in queue 7
478 * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
479 * order from queue0(highest priority) to queue7(lowest_priority)
482 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
484 uint32_t queue_count;
486 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
487 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
488 "Queue count get failed");
491 uint32_t range = MAX_EVENTS / queue_count;
492 uint32_t expected_val = (index % range) * queue_count;
494 expected_val += ev->queue_id;
496 RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
497 "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
498 ev->mbuf->seqn, index, expected_val, range,
499 queue_count, MAX_EVENTS);
504 test_multi_queue_priority(void)
506 int i, max_evts_roundoff;
507 /* See validate_queue_priority() comments for priority validate logic */
508 uint32_t queue_count;
512 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
513 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
514 "Queue count get failed");
517 max_evts_roundoff = MAX_EVENTS / queue_count;
518 max_evts_roundoff *= queue_count;
520 for (i = 0; i < max_evts_roundoff; i++) {
521 struct rte_event ev = {.event = 0, .u64 = 0};
523 m = rte_pktmbuf_alloc(eventdev_test_mempool);
524 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
527 queue = i % queue_count;
528 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
529 0, RTE_SCHED_TYPE_PARALLEL,
531 rte_event_enqueue_burst(evdev, 0, &ev, 1);
534 return consume_events(0, max_evts_roundoff, validate_queue_priority);
538 worker_multi_port_fn(void *arg)
540 struct test_core_param *param = arg;
541 rte_atomic32_t *total_events = param->total_events;
542 uint8_t port = param->port;
543 uint16_t valid_event;
547 while (rte_atomic32_read(total_events) > 0) {
548 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
552 ret = validate_event(&ev);
553 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
554 rte_pktmbuf_free(ev.mbuf);
555 rte_atomic32_sub(total_events, 1);
562 wait_workers_to_join(const rte_atomic32_t *count)
564 uint64_t cycles, print_cycles;
566 cycles = rte_get_timer_cycles();
567 print_cycles = cycles;
568 while (rte_atomic32_read(count)) {
569 uint64_t new_cycles = rte_get_timer_cycles();
571 if (new_cycles - print_cycles > rte_get_timer_hz()) {
572 otx2_err("Events %d", rte_atomic32_read(count));
573 print_cycles = new_cycles;
575 if (new_cycles - cycles > rte_get_timer_hz() * 10000000000) {
576 otx2_err("No schedules for seconds, deadlock (%d)",
577 rte_atomic32_read(count));
578 rte_event_dev_dump(evdev, stdout);
583 rte_eal_mp_wait_lcore();
589 launch_workers_and_wait(int (*master_worker)(void *),
590 int (*slave_workers)(void *), uint32_t total_events,
591 uint8_t nb_workers, uint8_t sched_type)
593 rte_atomic32_t atomic_total_events;
594 struct test_core_param *param;
595 uint64_t dequeue_tmo_ticks;
603 rte_atomic32_set(&atomic_total_events, total_events);
606 param = malloc(sizeof(struct test_core_param) * nb_workers);
610 ret = rte_event_dequeue_timeout_ticks(evdev,
611 rte_rand() % 10000000/* 10ms */,
618 param[0].total_events = &atomic_total_events;
619 param[0].sched_type = sched_type;
621 param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
624 w_lcore = rte_get_next_lcore(
628 rte_eal_remote_launch(master_worker, ¶m[0], w_lcore);
630 for (port = 1; port < nb_workers; port++) {
631 param[port].total_events = &atomic_total_events;
632 param[port].sched_type = sched_type;
633 param[port].port = port;
634 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
636 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
637 rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore);
641 ret = wait_workers_to_join(&atomic_total_events);
648 * Generate a prescribed number of events and spread them across available
649 * queues. Dequeue the events through multiple ports and verify the enqueued
653 test_multi_queue_enq_multi_port_deq(void)
655 const unsigned int total_events = MAX_EVENTS;
659 ret = generate_random_events(total_events);
663 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
664 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
665 "Port count get failed");
666 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
669 otx2_err("Not enough ports=%d or workers=%d", nr_ports,
670 rte_lcore_count() - 1);
674 return launch_workers_and_wait(worker_multi_port_fn,
675 worker_multi_port_fn, total_events,
676 nr_ports, 0xff /* invalid */);
680 void flush(uint8_t dev_id, struct rte_event event, void *arg)
682 unsigned int *count = arg;
684 RTE_SET_USED(dev_id);
685 if (event.event_type == RTE_EVENT_TYPE_CPU)
690 test_dev_stop_flush(void)
692 unsigned int total_events = MAX_EVENTS, count = 0;
695 ret = generate_random_events(total_events);
699 ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
702 rte_event_dev_stop(evdev);
703 ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
706 RTE_TEST_ASSERT_EQUAL(total_events, count,
707 "count mismatch total_events=%d count=%d",
708 total_events, count);
714 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
715 struct rte_event *ev)
718 RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
719 "queue mismatch enq=%d deq =%d",
726 * Link queue x to port x and check correctness of link by checking
727 * queue_id == x on dequeue on the specific port x
730 test_queue_to_port_single_link(void)
732 int i, nr_links, ret;
733 uint32_t queue_count;
736 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
737 RTE_EVENT_DEV_ATTR_PORT_COUNT, &port_count),
738 "Port count get failed");
740 /* Unlink all connections that created in eventdev_setup */
741 for (i = 0; i < (int)port_count; i++) {
742 ret = rte_event_port_unlink(evdev, i, NULL, 0);
743 RTE_TEST_ASSERT(ret >= 0,
744 "Failed to unlink all queues port=%d", i);
747 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
748 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
749 "Queue count get failed");
751 nr_links = RTE_MIN(port_count, queue_count);
752 const unsigned int total_events = MAX_EVENTS / nr_links;
754 /* Link queue x to port x and inject events to queue x through port x */
755 for (i = 0; i < nr_links; i++) {
756 uint8_t queue = (uint8_t)i;
758 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
759 RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
761 ret = inject_events(0x100 /*flow_id */,
762 RTE_EVENT_TYPE_CPU /* event_type */,
763 rte_rand() % 256 /* sub_event_type */,
764 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
765 queue /* queue */, i /* port */,
766 total_events /* events */);
771 /* Verify the events generated from correct queue */
772 for (i = 0; i < nr_links; i++) {
773 ret = consume_events(i /* port */, total_events,
774 validate_queue_to_port_single_link);
783 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
784 struct rte_event *ev)
787 RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
788 "queue mismatch enq=%d deq =%d",
795 * Link all even number of queues to port 0 and all odd number of queues to
796 * port 1 and verify the link connection on dequeue
799 test_queue_to_port_multi_link(void)
801 int ret, port0_events = 0, port1_events = 0;
802 uint32_t nr_queues = 0;
803 uint32_t nr_ports = 0;
806 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
807 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &nr_queues),
808 "Queue count get failed");
809 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
810 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &nr_queues),
811 "Queue count get failed");
812 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
813 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
814 "Port count get failed");
817 otx2_err("Not enough ports to test ports=%d", nr_ports);
821 /* Unlink all connections that created in eventdev_setup */
822 for (port = 0; port < nr_ports; port++) {
823 ret = rte_event_port_unlink(evdev, port, NULL, 0);
824 RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
828 const unsigned int total_events = MAX_EVENTS / nr_queues;
830 /* Link all even number of queues to port0 and odd numbers to port 1*/
831 for (queue = 0; queue < nr_queues; queue++) {
833 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
834 RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
837 ret = inject_events(0x100 /*flow_id */,
838 RTE_EVENT_TYPE_CPU /* event_type */,
839 rte_rand() % 256 /* sub_event_type */,
840 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
841 queue /* queue */, port /* port */,
842 total_events /* events */);
847 port0_events += total_events;
849 port1_events += total_events;
852 ret = consume_events(0 /* port */, port0_events,
853 validate_queue_to_port_multi_link);
856 ret = consume_events(1 /* port */, port1_events,
857 validate_queue_to_port_multi_link);
865 worker_flow_based_pipeline(void *arg)
867 struct test_core_param *param = arg;
868 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
869 rte_atomic32_t *total_events = param->total_events;
870 uint8_t new_sched_type = param->sched_type;
871 uint8_t port = param->port;
872 uint16_t valid_event;
875 while (rte_atomic32_read(total_events) > 0) {
876 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
881 /* Events from stage 0 */
882 if (ev.sub_event_type == 0) {
883 /* Move to atomic flow to maintain the ordering */
885 ev.event_type = RTE_EVENT_TYPE_CPU;
886 ev.sub_event_type = 1; /* stage 1 */
887 ev.sched_type = new_sched_type;
888 ev.op = RTE_EVENT_OP_FORWARD;
889 rte_event_enqueue_burst(evdev, port, &ev, 1);
890 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
891 if (seqn_list_update(ev.mbuf->seqn) == 0) {
892 rte_pktmbuf_free(ev.mbuf);
893 rte_atomic32_sub(total_events, 1);
895 otx2_err("Failed to update seqn_list");
899 otx2_err("Invalid ev.sub_event_type = %d",
908 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
909 uint8_t out_sched_type)
911 const unsigned int total_events = MAX_EVENTS;
915 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
916 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
917 "Port count get failed");
918 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
921 otx2_err("Not enough ports=%d or workers=%d", nr_ports,
922 rte_lcore_count() - 1);
926 /* Injects events with m->seqn=0 to total_events */
927 ret = inject_events(0x1 /*flow_id */,
928 RTE_EVENT_TYPE_CPU /* event_type */,
929 0 /* sub_event_type (stage 0) */,
933 total_events /* events */);
938 ret = launch_workers_and_wait(worker_flow_based_pipeline,
939 worker_flow_based_pipeline, total_events,
940 nr_ports, out_sched_type);
944 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
945 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
946 /* Check the events order maintained or not */
947 return seqn_list_check(total_events);
953 /* Multi port ordered to atomic transaction */
955 test_multi_port_flow_ordered_to_atomic(void)
957 /* Ingress event order test */
958 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
959 RTE_SCHED_TYPE_ATOMIC);
963 test_multi_port_flow_ordered_to_ordered(void)
965 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
966 RTE_SCHED_TYPE_ORDERED);
970 test_multi_port_flow_ordered_to_parallel(void)
972 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
973 RTE_SCHED_TYPE_PARALLEL);
977 test_multi_port_flow_atomic_to_atomic(void)
979 /* Ingress event order test */
980 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
981 RTE_SCHED_TYPE_ATOMIC);
985 test_multi_port_flow_atomic_to_ordered(void)
987 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
988 RTE_SCHED_TYPE_ORDERED);
992 test_multi_port_flow_atomic_to_parallel(void)
994 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
995 RTE_SCHED_TYPE_PARALLEL);
999 test_multi_port_flow_parallel_to_atomic(void)
1001 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1002 RTE_SCHED_TYPE_ATOMIC);
1006 test_multi_port_flow_parallel_to_ordered(void)
1008 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1009 RTE_SCHED_TYPE_ORDERED);
1013 test_multi_port_flow_parallel_to_parallel(void)
1015 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1016 RTE_SCHED_TYPE_PARALLEL);
1020 worker_group_based_pipeline(void *arg)
1022 struct test_core_param *param = arg;
1023 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
1024 rte_atomic32_t *total_events = param->total_events;
1025 uint8_t new_sched_type = param->sched_type;
1026 uint8_t port = param->port;
1027 uint16_t valid_event;
1028 struct rte_event ev;
1030 while (rte_atomic32_read(total_events) > 0) {
1031 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
1036 /* Events from stage 0(group 0) */
1037 if (ev.queue_id == 0) {
1038 /* Move to atomic flow to maintain the ordering */
1040 ev.event_type = RTE_EVENT_TYPE_CPU;
1041 ev.sched_type = new_sched_type;
1042 ev.queue_id = 1; /* Stage 1*/
1043 ev.op = RTE_EVENT_OP_FORWARD;
1044 rte_event_enqueue_burst(evdev, port, &ev, 1);
1045 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
1046 if (seqn_list_update(ev.mbuf->seqn) == 0) {
1047 rte_pktmbuf_free(ev.mbuf);
1048 rte_atomic32_sub(total_events, 1);
1050 otx2_err("Failed to update seqn_list");
1054 otx2_err("Invalid ev.queue_id = %d", ev.queue_id);
1063 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1064 uint8_t out_sched_type)
1066 const unsigned int total_events = MAX_EVENTS;
1067 uint32_t queue_count;
1071 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1072 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
1073 "Port count get failed");
1075 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1077 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1078 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1079 "Queue count get failed");
1080 if (queue_count < 2 || !nr_ports) {
1081 otx2_err("Not enough queues=%d ports=%d or workers=%d",
1082 queue_count, nr_ports,
1083 rte_lcore_count() - 1);
1087 /* Injects events with m->seqn=0 to total_events */
1088 ret = inject_events(0x1 /*flow_id */,
1089 RTE_EVENT_TYPE_CPU /* event_type */,
1090 0 /* sub_event_type (stage 0) */,
1094 total_events /* events */);
1098 ret = launch_workers_and_wait(worker_group_based_pipeline,
1099 worker_group_based_pipeline, total_events,
1100 nr_ports, out_sched_type);
1104 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1105 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1106 /* Check the events order maintained or not */
1107 return seqn_list_check(total_events);
1114 test_multi_port_queue_ordered_to_atomic(void)
1116 /* Ingress event order test */
1117 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1118 RTE_SCHED_TYPE_ATOMIC);
1122 test_multi_port_queue_ordered_to_ordered(void)
1124 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1125 RTE_SCHED_TYPE_ORDERED);
1129 test_multi_port_queue_ordered_to_parallel(void)
1131 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1132 RTE_SCHED_TYPE_PARALLEL);
1136 test_multi_port_queue_atomic_to_atomic(void)
1138 /* Ingress event order test */
1139 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1140 RTE_SCHED_TYPE_ATOMIC);
1144 test_multi_port_queue_atomic_to_ordered(void)
1146 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1147 RTE_SCHED_TYPE_ORDERED);
1151 test_multi_port_queue_atomic_to_parallel(void)
1153 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1154 RTE_SCHED_TYPE_PARALLEL);
1158 test_multi_port_queue_parallel_to_atomic(void)
1160 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1161 RTE_SCHED_TYPE_ATOMIC);
1165 test_multi_port_queue_parallel_to_ordered(void)
1167 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1168 RTE_SCHED_TYPE_ORDERED);
1172 test_multi_port_queue_parallel_to_parallel(void)
1174 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1175 RTE_SCHED_TYPE_PARALLEL);
1179 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1181 struct test_core_param *param = arg;
1182 rte_atomic32_t *total_events = param->total_events;
1183 uint8_t port = param->port;
1184 uint16_t valid_event;
1185 struct rte_event ev;
1187 while (rte_atomic32_read(total_events) > 0) {
1188 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1192 if (ev.sub_event_type == 255) { /* last stage */
1193 rte_pktmbuf_free(ev.mbuf);
1194 rte_atomic32_sub(total_events, 1);
1196 ev.event_type = RTE_EVENT_TYPE_CPU;
1197 ev.sub_event_type++;
1199 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1200 ev.op = RTE_EVENT_OP_FORWARD;
1201 rte_event_enqueue_burst(evdev, port, &ev, 1);
1209 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1214 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1215 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
1216 "Port count get failed");
1217 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1220 otx2_err("Not enough ports=%d or workers=%d",
1221 nr_ports, rte_lcore_count() - 1);
1225 /* Injects events with m->seqn=0 to total_events */
1226 ret = inject_events(0x1 /*flow_id */,
1227 RTE_EVENT_TYPE_CPU /* event_type */,
1228 0 /* sub_event_type (stage 0) */,
1230 (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1233 MAX_EVENTS /* events */);
1237 return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1238 0xff /* invalid */);
1241 /* Flow based pipeline with maximum stages with random sched type */
1243 test_multi_port_flow_max_stages_random_sched_type(void)
1245 return launch_multi_port_max_stages_random_sched_type(
1246 worker_flow_based_pipeline_max_stages_rand_sched_type);
1250 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1252 struct test_core_param *param = arg;
1253 uint8_t port = param->port;
1254 uint32_t queue_count;
1255 uint16_t valid_event;
1256 struct rte_event ev;
1258 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1259 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1260 "Queue count get failed");
1261 uint8_t nr_queues = queue_count;
1262 rte_atomic32_t *total_events = param->total_events;
1264 while (rte_atomic32_read(total_events) > 0) {
1265 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1269 if (ev.queue_id == nr_queues - 1) { /* last stage */
1270 rte_pktmbuf_free(ev.mbuf);
1271 rte_atomic32_sub(total_events, 1);
1273 ev.event_type = RTE_EVENT_TYPE_CPU;
1276 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1277 ev.op = RTE_EVENT_OP_FORWARD;
1278 rte_event_enqueue_burst(evdev, port, &ev, 1);
1285 /* Queue based pipeline with maximum stages with random sched type */
1287 test_multi_port_queue_max_stages_random_sched_type(void)
1289 return launch_multi_port_max_stages_random_sched_type(
1290 worker_queue_based_pipeline_max_stages_rand_sched_type);
1294 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1296 struct test_core_param *param = arg;
1297 uint8_t port = param->port;
1298 uint32_t queue_count;
1299 uint16_t valid_event;
1300 struct rte_event ev;
1302 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1303 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1304 "Queue count get failed");
1305 uint8_t nr_queues = queue_count;
1306 rte_atomic32_t *total_events = param->total_events;
1308 while (rte_atomic32_read(total_events) > 0) {
1309 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1313 if (ev.queue_id == nr_queues - 1) { /* Last stage */
1314 rte_pktmbuf_free(ev.mbuf);
1315 rte_atomic32_sub(total_events, 1);
1317 ev.event_type = RTE_EVENT_TYPE_CPU;
1319 ev.sub_event_type = rte_rand() % 256;
1321 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1322 ev.op = RTE_EVENT_OP_FORWARD;
1323 rte_event_enqueue_burst(evdev, port, &ev, 1);
1330 /* Queue and flow based pipeline with maximum stages with random sched type */
1332 test_multi_port_mixed_max_stages_random_sched_type(void)
1334 return launch_multi_port_max_stages_random_sched_type(
1335 worker_mixed_pipeline_max_stages_rand_sched_type);
1339 worker_ordered_flow_producer(void *arg)
1341 struct test_core_param *param = arg;
1342 uint8_t port = param->port;
1346 while (counter < NUM_PACKETS) {
1347 m = rte_pktmbuf_alloc(eventdev_test_mempool);
1351 m->seqn = counter++;
1353 struct rte_event ev = {.event = 0, .u64 = 0};
1355 ev.flow_id = 0x1; /* Generate a fat flow */
1356 ev.sub_event_type = 0;
1357 /* Inject the new event */
1358 ev.op = RTE_EVENT_OP_NEW;
1359 ev.event_type = RTE_EVENT_TYPE_CPU;
1360 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1363 rte_event_enqueue_burst(evdev, port, &ev, 1);
1370 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1374 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1375 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
1376 "Port count get failed");
1377 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1379 if (rte_lcore_count() < 3 || nr_ports < 2) {
1380 otx2_err("### Not enough cores for test.");
1384 launch_workers_and_wait(worker_ordered_flow_producer, fn,
1385 NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
1386 /* Check the events order maintained or not */
1387 return seqn_list_check(NUM_PACKETS);
1390 /* Flow based producer consumer ingress order test */
1392 test_flow_producer_consumer_ingress_order_test(void)
1394 return test_producer_consumer_ingress_order_test(
1395 worker_flow_based_pipeline);
1398 /* Queue based producer consumer ingress order test */
1400 test_queue_producer_consumer_ingress_order_test(void)
1402 return test_producer_consumer_ingress_order_test(
1403 worker_group_based_pipeline);
1406 static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),
1407 int (*test)(void), const char *name)
1410 printf("Error setting up test %s", name);
1415 printf("+ TestCase [%2d] : %s failed\n", total, name);
1418 printf("+ TestCase [%2d] : %s succeeded\n", total,
1428 otx2_sso_selftest(void)
1432 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1433 test_simple_enqdeq_ordered);
1434 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1435 test_simple_enqdeq_atomic);
1436 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1437 test_simple_enqdeq_parallel);
1438 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1439 test_multi_queue_enq_single_port_deq);
1440 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1441 test_dev_stop_flush);
1442 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1443 test_multi_queue_enq_multi_port_deq);
1444 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1445 test_queue_to_port_single_link);
1446 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1447 test_queue_to_port_multi_link);
1448 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1449 test_multi_port_flow_ordered_to_atomic);
1450 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1451 test_multi_port_flow_ordered_to_ordered);
1452 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1453 test_multi_port_flow_ordered_to_parallel);
1454 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1455 test_multi_port_flow_atomic_to_atomic);
1456 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1457 test_multi_port_flow_atomic_to_ordered);
1458 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1459 test_multi_port_flow_atomic_to_parallel);
1460 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1461 test_multi_port_flow_parallel_to_atomic);
1462 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1463 test_multi_port_flow_parallel_to_ordered);
1464 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1465 test_multi_port_flow_parallel_to_parallel);
1466 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1467 test_multi_port_queue_ordered_to_atomic);
1468 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1469 test_multi_port_queue_ordered_to_ordered);
1470 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1471 test_multi_port_queue_ordered_to_parallel);
1472 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1473 test_multi_port_queue_atomic_to_atomic);
1474 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1475 test_multi_port_queue_atomic_to_ordered);
1476 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1477 test_multi_port_queue_atomic_to_parallel);
1478 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1479 test_multi_port_queue_parallel_to_atomic);
1480 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1481 test_multi_port_queue_parallel_to_ordered);
1482 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1483 test_multi_port_queue_parallel_to_parallel);
1484 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1485 test_multi_port_flow_max_stages_random_sched_type);
1486 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1487 test_multi_port_queue_max_stages_random_sched_type);
1488 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1489 test_multi_port_mixed_max_stages_random_sched_type);
1490 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1491 test_flow_producer_consumer_ingress_order_test);
1492 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1493 test_queue_producer_consumer_ingress_order_test);
1494 OCTEONTX2_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
1495 test_multi_queue_priority);
1496 OCTEONTX2_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1497 test_multi_port_flow_ordered_to_atomic);
1498 OCTEONTX2_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1499 test_multi_port_queue_ordered_to_atomic);
1500 printf("Total tests : %d\n", total);
1501 printf("Passed : %d\n", passed);
1502 printf("Failed : %d\n", failed);
1503 printf("Not supported : %d\n", unsupported);
1505 testsuite_teardown();