1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
13 #include <rte_launch.h>
14 #include <rte_lcore.h>
16 #include <rte_malloc.h>
17 #include <rte_memcpy.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
22 #include "otx2_evdev.h"
24 #define NUM_PACKETS (1024)
25 #define MAX_EVENTS (1024)
27 #define OCTEONTX2_TEST_RUN(setup, teardown, test) \
28 octeontx_test_run(setup, teardown, test, #test)
33 static int unsupported;
36 static struct rte_mempool *eventdev_test_mempool;
41 uint8_t sub_event_type;
47 static uint32_t seqn_list_index;
48 static int seqn_list[NUM_PACKETS];
53 RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
54 memset(seqn_list, 0, sizeof(seqn_list));
59 seqn_list_update(int val)
61 if (seqn_list_index >= NUM_PACKETS)
64 seqn_list[seqn_list_index++] = val;
70 seqn_list_check(int limit)
74 for (i = 0; i < limit; i++) {
75 if (seqn_list[i] != i) {
76 otx2_err("Seqn mismatch %d %d", seqn_list[i], i);
83 struct test_core_param {
84 rte_atomic32_t *total_events;
85 uint64_t dequeue_tmo_ticks;
93 const char *eventdev_name = "event_octeontx2";
95 evdev = rte_event_dev_get_dev_id(eventdev_name);
97 otx2_err("%d: Eventdev %s not found", __LINE__, eventdev_name);
104 testsuite_teardown(void)
106 rte_event_dev_close(evdev);
110 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
111 struct rte_event_dev_info *info)
113 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
114 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
115 dev_conf->nb_event_ports = info->max_event_ports;
116 dev_conf->nb_event_queues = info->max_event_queues;
117 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
118 dev_conf->nb_event_port_dequeue_depth =
119 info->max_event_port_dequeue_depth;
120 dev_conf->nb_event_port_enqueue_depth =
121 info->max_event_port_enqueue_depth;
122 dev_conf->nb_event_port_enqueue_depth =
123 info->max_event_port_enqueue_depth;
124 dev_conf->nb_events_limit =
125 info->max_num_events;
129 TEST_EVENTDEV_SETUP_DEFAULT,
130 TEST_EVENTDEV_SETUP_PRIORITY,
131 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
135 _eventdev_setup(int mode)
137 const char *pool_name = "evdev_octeontx_test_pool";
138 struct rte_event_dev_config dev_conf;
139 struct rte_event_dev_info info;
142 /* Create and destrory pool for each test case to make it standalone */
143 eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name, MAX_EVENTS,
146 if (!eventdev_test_mempool) {
147 otx2_err("ERROR creating mempool");
151 ret = rte_event_dev_info_get(evdev, &info);
152 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
154 devconf_set_default_sane_values(&dev_conf, &info);
155 if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
156 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
158 ret = rte_event_dev_configure(evdev, &dev_conf);
159 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
161 uint32_t queue_count;
162 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
163 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
164 "Queue count get failed");
166 if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
170 /* Configure event queues(0 to n) with
171 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
172 * RTE_EVENT_DEV_PRIORITY_LOWEST
174 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
176 for (i = 0; i < (int)queue_count; i++) {
177 struct rte_event_queue_conf queue_conf;
179 ret = rte_event_queue_default_conf_get(evdev, i,
181 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
183 queue_conf.priority = i * step;
184 ret = rte_event_queue_setup(evdev, i, &queue_conf);
185 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
190 /* Configure event queues with default priority */
191 for (i = 0; i < (int)queue_count; i++) {
192 ret = rte_event_queue_setup(evdev, i, NULL);
193 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
197 /* Configure event ports */
199 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
200 RTE_EVENT_DEV_ATTR_PORT_COUNT, &port_count),
201 "Port count get failed");
202 for (i = 0; i < (int)port_count; i++) {
203 ret = rte_event_port_setup(evdev, i, NULL);
204 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
205 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
206 RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
210 ret = rte_event_dev_start(evdev);
211 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
219 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
223 eventdev_setup_priority(void)
225 return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
229 eventdev_setup_dequeue_timeout(void)
231 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
235 eventdev_teardown(void)
237 rte_event_dev_stop(evdev);
238 rte_mempool_free(eventdev_test_mempool);
242 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
243 uint32_t flow_id, uint8_t event_type,
244 uint8_t sub_event_type, uint8_t sched_type,
245 uint8_t queue, uint8_t port)
247 struct event_attr *attr;
249 /* Store the event attributes in mbuf for future reference */
250 attr = rte_pktmbuf_mtod(m, struct event_attr *);
251 attr->flow_id = flow_id;
252 attr->event_type = event_type;
253 attr->sub_event_type = sub_event_type;
254 attr->sched_type = sched_type;
258 ev->flow_id = flow_id;
259 ev->sub_event_type = sub_event_type;
260 ev->event_type = event_type;
261 /* Inject the new event */
262 ev->op = RTE_EVENT_OP_NEW;
263 ev->sched_type = sched_type;
264 ev->queue_id = queue;
269 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
270 uint8_t sched_type, uint8_t queue, uint8_t port,
276 for (i = 0; i < events; i++) {
277 struct rte_event ev = {.event = 0, .u64 = 0};
279 m = rte_pktmbuf_alloc(eventdev_test_mempool);
280 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
282 *rte_event_pmd_selftest_seqn(m) = i;
283 update_event_and_validation_attr(m, &ev, flow_id, event_type,
284 sub_event_type, sched_type,
286 rte_event_enqueue_burst(evdev, port, &ev, 1);
292 check_excess_events(uint8_t port)
294 uint16_t valid_event;
298 /* Check for excess events, try for a few times and exit */
299 for (i = 0; i < 32; i++) {
300 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
302 RTE_TEST_ASSERT_SUCCESS(valid_event,
303 "Unexpected valid event=%d",
304 *rte_event_pmd_selftest_seqn(ev.mbuf));
310 generate_random_events(const unsigned int total_events)
312 struct rte_event_dev_info info;
313 uint32_t queue_count;
317 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
318 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
319 "Queue count get failed");
321 ret = rte_event_dev_info_get(evdev, &info);
322 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
323 for (i = 0; i < total_events; i++) {
325 rte_rand() % info.max_event_queue_flows /*flow_id */,
326 RTE_EVENT_TYPE_CPU /* event_type */,
327 rte_rand() % 256 /* sub_event_type */,
328 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
329 rte_rand() % queue_count /* queue */,
340 validate_event(struct rte_event *ev)
342 struct event_attr *attr;
344 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
345 RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
346 "flow_id mismatch enq=%d deq =%d",
347 attr->flow_id, ev->flow_id);
348 RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
349 "event_type mismatch enq=%d deq =%d",
350 attr->event_type, ev->event_type);
351 RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
352 "sub_event_type mismatch enq=%d deq =%d",
353 attr->sub_event_type, ev->sub_event_type);
354 RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
355 "sched_type mismatch enq=%d deq =%d",
356 attr->sched_type, ev->sched_type);
357 RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
358 "queue mismatch enq=%d deq =%d",
359 attr->queue, ev->queue_id);
363 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
364 struct rte_event *ev);
367 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
369 uint32_t events = 0, forward_progress_cnt = 0, index = 0;
370 uint16_t valid_event;
375 if (++forward_progress_cnt > UINT16_MAX) {
376 otx2_err("Detected deadlock");
380 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
384 forward_progress_cnt = 0;
385 ret = validate_event(&ev);
390 ret = fn(index, port, &ev);
391 RTE_TEST_ASSERT_SUCCESS(ret,
392 "Failed to validate test specific event");
397 rte_pktmbuf_free(ev.mbuf);
398 if (++events >= total_events)
402 return check_excess_events(port);
406 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
409 RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),
410 "index=%d != seqn=%d",
411 index, *rte_event_pmd_selftest_seqn(ev->mbuf));
416 test_simple_enqdeq(uint8_t sched_type)
420 ret = inject_events(0 /*flow_id */,
421 RTE_EVENT_TYPE_CPU /* event_type */,
422 0 /* sub_event_type */,
430 return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
434 test_simple_enqdeq_ordered(void)
436 return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
440 test_simple_enqdeq_atomic(void)
442 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
446 test_simple_enqdeq_parallel(void)
448 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
452 * Generate a prescribed number of events and spread them across available
453 * queues. On dequeue, using single event port(port 0) verify the enqueued
457 test_multi_queue_enq_single_port_deq(void)
461 ret = generate_random_events(MAX_EVENTS);
465 return consume_events(0 /* port */, MAX_EVENTS, NULL);
469 * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
472 * For example, Inject 32 events over 0..7 queues
473 * enqueue events 0, 8, 16, 24 in queue 0
474 * enqueue events 1, 9, 17, 25 in queue 1
477 * enqueue events 7, 15, 23, 31 in queue 7
479 * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
480 * order from queue0(highest priority) to queue7(lowest_priority)
483 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
485 uint32_t queue_count;
487 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
488 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
489 "Queue count get failed");
492 uint32_t range = MAX_EVENTS / queue_count;
493 uint32_t expected_val = (index % range) * queue_count;
495 expected_val += ev->queue_id;
497 RTE_TEST_ASSERT_EQUAL(
498 *rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,
499 "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
500 *rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val,
501 range, queue_count, MAX_EVENTS);
506 test_multi_queue_priority(void)
508 int i, max_evts_roundoff;
509 /* See validate_queue_priority() comments for priority validate logic */
510 uint32_t queue_count;
514 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
515 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
516 "Queue count get failed");
519 max_evts_roundoff = MAX_EVENTS / queue_count;
520 max_evts_roundoff *= queue_count;
522 for (i = 0; i < max_evts_roundoff; i++) {
523 struct rte_event ev = {.event = 0, .u64 = 0};
525 m = rte_pktmbuf_alloc(eventdev_test_mempool);
526 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
528 *rte_event_pmd_selftest_seqn(m) = i;
529 queue = i % queue_count;
530 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
531 0, RTE_SCHED_TYPE_PARALLEL,
533 rte_event_enqueue_burst(evdev, 0, &ev, 1);
536 return consume_events(0, max_evts_roundoff, validate_queue_priority);
540 worker_multi_port_fn(void *arg)
542 struct test_core_param *param = arg;
543 rte_atomic32_t *total_events = param->total_events;
544 uint8_t port = param->port;
545 uint16_t valid_event;
549 while (rte_atomic32_read(total_events) > 0) {
550 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
554 ret = validate_event(&ev);
555 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
556 rte_pktmbuf_free(ev.mbuf);
557 rte_atomic32_sub(total_events, 1);
564 wait_workers_to_join(const rte_atomic32_t *count)
566 uint64_t cycles, print_cycles;
568 cycles = rte_get_timer_cycles();
569 print_cycles = cycles;
570 while (rte_atomic32_read(count)) {
571 uint64_t new_cycles = rte_get_timer_cycles();
573 if (new_cycles - print_cycles > rte_get_timer_hz()) {
574 otx2_err("Events %d", rte_atomic32_read(count));
575 print_cycles = new_cycles;
577 if (new_cycles - cycles > rte_get_timer_hz() * 10000000000) {
578 otx2_err("No schedules for seconds, deadlock (%d)",
579 rte_atomic32_read(count));
580 rte_event_dev_dump(evdev, stdout);
585 rte_eal_mp_wait_lcore();
591 launch_workers_and_wait(int (*main_thread)(void *),
592 int (*worker_thread)(void *), uint32_t total_events,
593 uint8_t nb_workers, uint8_t sched_type)
595 rte_atomic32_t atomic_total_events;
596 struct test_core_param *param;
597 uint64_t dequeue_tmo_ticks;
605 rte_atomic32_set(&atomic_total_events, total_events);
608 param = malloc(sizeof(struct test_core_param) * nb_workers);
612 ret = rte_event_dequeue_timeout_ticks(evdev,
613 rte_rand() % 10000000/* 10ms */,
620 param[0].total_events = &atomic_total_events;
621 param[0].sched_type = sched_type;
623 param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
626 w_lcore = rte_get_next_lcore(
630 rte_eal_remote_launch(main_thread, ¶m[0], w_lcore);
632 for (port = 1; port < nb_workers; port++) {
633 param[port].total_events = &atomic_total_events;
634 param[port].sched_type = sched_type;
635 param[port].port = port;
636 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
638 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
639 rte_eal_remote_launch(worker_thread, ¶m[port], w_lcore);
643 ret = wait_workers_to_join(&atomic_total_events);
650 * Generate a prescribed number of events and spread them across available
651 * queues. Dequeue the events through multiple ports and verify the enqueued
655 test_multi_queue_enq_multi_port_deq(void)
657 const unsigned int total_events = MAX_EVENTS;
661 ret = generate_random_events(total_events);
665 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
666 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
667 "Port count get failed");
668 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
671 otx2_err("Not enough ports=%d or workers=%d", nr_ports,
672 rte_lcore_count() - 1);
676 return launch_workers_and_wait(worker_multi_port_fn,
677 worker_multi_port_fn, total_events,
678 nr_ports, 0xff /* invalid */);
682 void flush(uint8_t dev_id, struct rte_event event, void *arg)
684 unsigned int *count = arg;
686 RTE_SET_USED(dev_id);
687 if (event.event_type == RTE_EVENT_TYPE_CPU)
692 test_dev_stop_flush(void)
694 unsigned int total_events = MAX_EVENTS, count = 0;
697 ret = generate_random_events(total_events);
701 ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
704 rte_event_dev_stop(evdev);
705 ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
708 RTE_TEST_ASSERT_EQUAL(total_events, count,
709 "count mismatch total_events=%d count=%d",
710 total_events, count);
716 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
717 struct rte_event *ev)
720 RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
721 "queue mismatch enq=%d deq =%d",
728 * Link queue x to port x and check correctness of link by checking
729 * queue_id == x on dequeue on the specific port x
732 test_queue_to_port_single_link(void)
734 int i, nr_links, ret;
735 uint32_t queue_count;
738 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
739 RTE_EVENT_DEV_ATTR_PORT_COUNT, &port_count),
740 "Port count get failed");
742 /* Unlink all connections that created in eventdev_setup */
743 for (i = 0; i < (int)port_count; i++) {
744 ret = rte_event_port_unlink(evdev, i, NULL, 0);
745 RTE_TEST_ASSERT(ret >= 0,
746 "Failed to unlink all queues port=%d", i);
749 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
750 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
751 "Queue count get failed");
753 nr_links = RTE_MIN(port_count, queue_count);
754 const unsigned int total_events = MAX_EVENTS / nr_links;
756 /* Link queue x to port x and inject events to queue x through port x */
757 for (i = 0; i < nr_links; i++) {
758 uint8_t queue = (uint8_t)i;
760 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
761 RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
763 ret = inject_events(0x100 /*flow_id */,
764 RTE_EVENT_TYPE_CPU /* event_type */,
765 rte_rand() % 256 /* sub_event_type */,
766 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
767 queue /* queue */, i /* port */,
768 total_events /* events */);
773 /* Verify the events generated from correct queue */
774 for (i = 0; i < nr_links; i++) {
775 ret = consume_events(i /* port */, total_events,
776 validate_queue_to_port_single_link);
785 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
786 struct rte_event *ev)
789 RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
790 "queue mismatch enq=%d deq =%d",
797 * Link all even number of queues to port 0 and all odd number of queues to
798 * port 1 and verify the link connection on dequeue
801 test_queue_to_port_multi_link(void)
803 int ret, port0_events = 0, port1_events = 0;
804 uint32_t nr_queues = 0;
805 uint32_t nr_ports = 0;
808 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
809 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &nr_queues),
810 "Queue count get failed");
811 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
812 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &nr_queues),
813 "Queue count get failed");
814 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
815 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
816 "Port count get failed");
819 otx2_err("Not enough ports to test ports=%d", nr_ports);
823 /* Unlink all connections that created in eventdev_setup */
824 for (port = 0; port < nr_ports; port++) {
825 ret = rte_event_port_unlink(evdev, port, NULL, 0);
826 RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
830 const unsigned int total_events = MAX_EVENTS / nr_queues;
832 /* Link all even number of queues to port0 and odd numbers to port 1*/
833 for (queue = 0; queue < nr_queues; queue++) {
835 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
836 RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
839 ret = inject_events(0x100 /*flow_id */,
840 RTE_EVENT_TYPE_CPU /* event_type */,
841 rte_rand() % 256 /* sub_event_type */,
842 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
843 queue /* queue */, port /* port */,
844 total_events /* events */);
849 port0_events += total_events;
851 port1_events += total_events;
854 ret = consume_events(0 /* port */, port0_events,
855 validate_queue_to_port_multi_link);
858 ret = consume_events(1 /* port */, port1_events,
859 validate_queue_to_port_multi_link);
867 worker_flow_based_pipeline(void *arg)
869 struct test_core_param *param = arg;
870 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
871 rte_atomic32_t *total_events = param->total_events;
872 uint8_t new_sched_type = param->sched_type;
873 uint8_t port = param->port;
874 uint16_t valid_event;
877 while (rte_atomic32_read(total_events) > 0) {
878 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
883 /* Events from stage 0 */
884 if (ev.sub_event_type == 0) {
885 /* Move to atomic flow to maintain the ordering */
887 ev.event_type = RTE_EVENT_TYPE_CPU;
888 ev.sub_event_type = 1; /* stage 1 */
889 ev.sched_type = new_sched_type;
890 ev.op = RTE_EVENT_OP_FORWARD;
891 rte_event_enqueue_burst(evdev, port, &ev, 1);
892 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
893 uint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);
895 if (seqn_list_update(seqn) == 0) {
896 rte_pktmbuf_free(ev.mbuf);
897 rte_atomic32_sub(total_events, 1);
899 otx2_err("Failed to update seqn_list");
903 otx2_err("Invalid ev.sub_event_type = %d",
912 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
913 uint8_t out_sched_type)
915 const unsigned int total_events = MAX_EVENTS;
919 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
920 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
921 "Port count get failed");
922 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
925 otx2_err("Not enough ports=%d or workers=%d", nr_ports,
926 rte_lcore_count() - 1);
930 /* Injects events with a 0 sequence number to total_events */
931 ret = inject_events(0x1 /*flow_id */,
932 RTE_EVENT_TYPE_CPU /* event_type */,
933 0 /* sub_event_type (stage 0) */,
937 total_events /* events */);
942 ret = launch_workers_and_wait(worker_flow_based_pipeline,
943 worker_flow_based_pipeline, total_events,
944 nr_ports, out_sched_type);
948 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
949 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
950 /* Check the events order maintained or not */
951 return seqn_list_check(total_events);
957 /* Multi port ordered to atomic transaction */
959 test_multi_port_flow_ordered_to_atomic(void)
961 /* Ingress event order test */
962 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
963 RTE_SCHED_TYPE_ATOMIC);
967 test_multi_port_flow_ordered_to_ordered(void)
969 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
970 RTE_SCHED_TYPE_ORDERED);
974 test_multi_port_flow_ordered_to_parallel(void)
976 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
977 RTE_SCHED_TYPE_PARALLEL);
981 test_multi_port_flow_atomic_to_atomic(void)
983 /* Ingress event order test */
984 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
985 RTE_SCHED_TYPE_ATOMIC);
989 test_multi_port_flow_atomic_to_ordered(void)
991 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
992 RTE_SCHED_TYPE_ORDERED);
996 test_multi_port_flow_atomic_to_parallel(void)
998 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
999 RTE_SCHED_TYPE_PARALLEL);
1003 test_multi_port_flow_parallel_to_atomic(void)
1005 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1006 RTE_SCHED_TYPE_ATOMIC);
1010 test_multi_port_flow_parallel_to_ordered(void)
1012 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1013 RTE_SCHED_TYPE_ORDERED);
1017 test_multi_port_flow_parallel_to_parallel(void)
1019 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1020 RTE_SCHED_TYPE_PARALLEL);
1024 worker_group_based_pipeline(void *arg)
1026 struct test_core_param *param = arg;
1027 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
1028 rte_atomic32_t *total_events = param->total_events;
1029 uint8_t new_sched_type = param->sched_type;
1030 uint8_t port = param->port;
1031 uint16_t valid_event;
1032 struct rte_event ev;
1034 while (rte_atomic32_read(total_events) > 0) {
1035 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
1040 /* Events from stage 0(group 0) */
1041 if (ev.queue_id == 0) {
1042 /* Move to atomic flow to maintain the ordering */
1044 ev.event_type = RTE_EVENT_TYPE_CPU;
1045 ev.sched_type = new_sched_type;
1046 ev.queue_id = 1; /* Stage 1*/
1047 ev.op = RTE_EVENT_OP_FORWARD;
1048 rte_event_enqueue_burst(evdev, port, &ev, 1);
1049 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
1050 uint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);
1052 if (seqn_list_update(seqn) == 0) {
1053 rte_pktmbuf_free(ev.mbuf);
1054 rte_atomic32_sub(total_events, 1);
1056 otx2_err("Failed to update seqn_list");
1060 otx2_err("Invalid ev.queue_id = %d", ev.queue_id);
1069 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1070 uint8_t out_sched_type)
1072 const unsigned int total_events = MAX_EVENTS;
1073 uint32_t queue_count;
1077 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1078 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
1079 "Port count get failed");
1081 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1083 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1084 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1085 "Queue count get failed");
1086 if (queue_count < 2 || !nr_ports) {
1087 otx2_err("Not enough queues=%d ports=%d or workers=%d",
1088 queue_count, nr_ports,
1089 rte_lcore_count() - 1);
1093 /* Injects events with a 0 sequence number to total_events */
1094 ret = inject_events(0x1 /*flow_id */,
1095 RTE_EVENT_TYPE_CPU /* event_type */,
1096 0 /* sub_event_type (stage 0) */,
1100 total_events /* events */);
1104 ret = launch_workers_and_wait(worker_group_based_pipeline,
1105 worker_group_based_pipeline, total_events,
1106 nr_ports, out_sched_type);
1110 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1111 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1112 /* Check the events order maintained or not */
1113 return seqn_list_check(total_events);
1120 test_multi_port_queue_ordered_to_atomic(void)
1122 /* Ingress event order test */
1123 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1124 RTE_SCHED_TYPE_ATOMIC);
1128 test_multi_port_queue_ordered_to_ordered(void)
1130 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1131 RTE_SCHED_TYPE_ORDERED);
1135 test_multi_port_queue_ordered_to_parallel(void)
1137 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1138 RTE_SCHED_TYPE_PARALLEL);
1142 test_multi_port_queue_atomic_to_atomic(void)
1144 /* Ingress event order test */
1145 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1146 RTE_SCHED_TYPE_ATOMIC);
1150 test_multi_port_queue_atomic_to_ordered(void)
1152 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1153 RTE_SCHED_TYPE_ORDERED);
1157 test_multi_port_queue_atomic_to_parallel(void)
1159 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1160 RTE_SCHED_TYPE_PARALLEL);
1164 test_multi_port_queue_parallel_to_atomic(void)
1166 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1167 RTE_SCHED_TYPE_ATOMIC);
1171 test_multi_port_queue_parallel_to_ordered(void)
1173 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1174 RTE_SCHED_TYPE_ORDERED);
1178 test_multi_port_queue_parallel_to_parallel(void)
1180 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1181 RTE_SCHED_TYPE_PARALLEL);
1185 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1187 struct test_core_param *param = arg;
1188 rte_atomic32_t *total_events = param->total_events;
1189 uint8_t port = param->port;
1190 uint16_t valid_event;
1191 struct rte_event ev;
1193 while (rte_atomic32_read(total_events) > 0) {
1194 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1198 if (ev.sub_event_type == 255) { /* last stage */
1199 rte_pktmbuf_free(ev.mbuf);
1200 rte_atomic32_sub(total_events, 1);
1202 ev.event_type = RTE_EVENT_TYPE_CPU;
1203 ev.sub_event_type++;
1205 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1206 ev.op = RTE_EVENT_OP_FORWARD;
1207 rte_event_enqueue_burst(evdev, port, &ev, 1);
1215 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1220 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1221 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
1222 "Port count get failed");
1223 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1226 otx2_err("Not enough ports=%d or workers=%d",
1227 nr_ports, rte_lcore_count() - 1);
1231 /* Injects events with a 0 sequence number to total_events */
1232 ret = inject_events(0x1 /*flow_id */,
1233 RTE_EVENT_TYPE_CPU /* event_type */,
1234 0 /* sub_event_type (stage 0) */,
1236 (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1239 MAX_EVENTS /* events */);
1243 return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1244 0xff /* invalid */);
1247 /* Flow based pipeline with maximum stages with random sched type */
1249 test_multi_port_flow_max_stages_random_sched_type(void)
1251 return launch_multi_port_max_stages_random_sched_type(
1252 worker_flow_based_pipeline_max_stages_rand_sched_type);
1256 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1258 struct test_core_param *param = arg;
1259 uint8_t port = param->port;
1260 uint32_t queue_count;
1261 uint16_t valid_event;
1262 struct rte_event ev;
1264 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1265 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1266 "Queue count get failed");
1267 uint8_t nr_queues = queue_count;
1268 rte_atomic32_t *total_events = param->total_events;
1270 while (rte_atomic32_read(total_events) > 0) {
1271 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1275 if (ev.queue_id == nr_queues - 1) { /* last stage */
1276 rte_pktmbuf_free(ev.mbuf);
1277 rte_atomic32_sub(total_events, 1);
1279 ev.event_type = RTE_EVENT_TYPE_CPU;
1282 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1283 ev.op = RTE_EVENT_OP_FORWARD;
1284 rte_event_enqueue_burst(evdev, port, &ev, 1);
1291 /* Queue based pipeline with maximum stages with random sched type */
1293 test_multi_port_queue_max_stages_random_sched_type(void)
1295 return launch_multi_port_max_stages_random_sched_type(
1296 worker_queue_based_pipeline_max_stages_rand_sched_type);
1300 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1302 struct test_core_param *param = arg;
1303 uint8_t port = param->port;
1304 uint32_t queue_count;
1305 uint16_t valid_event;
1306 struct rte_event ev;
1308 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1309 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1310 "Queue count get failed");
1311 uint8_t nr_queues = queue_count;
1312 rte_atomic32_t *total_events = param->total_events;
1314 while (rte_atomic32_read(total_events) > 0) {
1315 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1319 if (ev.queue_id == nr_queues - 1) { /* Last stage */
1320 rte_pktmbuf_free(ev.mbuf);
1321 rte_atomic32_sub(total_events, 1);
1323 ev.event_type = RTE_EVENT_TYPE_CPU;
1325 ev.sub_event_type = rte_rand() % 256;
1327 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1328 ev.op = RTE_EVENT_OP_FORWARD;
1329 rte_event_enqueue_burst(evdev, port, &ev, 1);
1336 /* Queue and flow based pipeline with maximum stages with random sched type */
1338 test_multi_port_mixed_max_stages_random_sched_type(void)
1340 return launch_multi_port_max_stages_random_sched_type(
1341 worker_mixed_pipeline_max_stages_rand_sched_type);
1345 worker_ordered_flow_producer(void *arg)
1347 struct test_core_param *param = arg;
1348 uint8_t port = param->port;
1352 while (counter < NUM_PACKETS) {
1353 m = rte_pktmbuf_alloc(eventdev_test_mempool);
1357 *rte_event_pmd_selftest_seqn(m) = counter++;
1359 struct rte_event ev = {.event = 0, .u64 = 0};
1361 ev.flow_id = 0x1; /* Generate a fat flow */
1362 ev.sub_event_type = 0;
1363 /* Inject the new event */
1364 ev.op = RTE_EVENT_OP_NEW;
1365 ev.event_type = RTE_EVENT_TYPE_CPU;
1366 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1369 rte_event_enqueue_burst(evdev, port, &ev, 1);
1376 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1380 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1381 RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
1382 "Port count get failed");
1383 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1385 if (rte_lcore_count() < 3 || nr_ports < 2) {
1386 otx2_err("### Not enough cores for test.");
1390 launch_workers_and_wait(worker_ordered_flow_producer, fn,
1391 NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
1392 /* Check the events order maintained or not */
1393 return seqn_list_check(NUM_PACKETS);
1396 /* Flow based producer consumer ingress order test */
1398 test_flow_producer_consumer_ingress_order_test(void)
1400 return test_producer_consumer_ingress_order_test(
1401 worker_flow_based_pipeline);
1404 /* Queue based producer consumer ingress order test */
1406 test_queue_producer_consumer_ingress_order_test(void)
1408 return test_producer_consumer_ingress_order_test(
1409 worker_group_based_pipeline);
1412 static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),
1413 int (*test)(void), const char *name)
1416 printf("Error setting up test %s", name);
1421 printf("+ TestCase [%2d] : %s failed\n", total, name);
1424 printf("+ TestCase [%2d] : %s succeeded\n", total,
1434 otx2_sso_selftest(void)
1438 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1439 test_simple_enqdeq_ordered);
1440 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1441 test_simple_enqdeq_atomic);
1442 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1443 test_simple_enqdeq_parallel);
1444 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1445 test_multi_queue_enq_single_port_deq);
1446 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1447 test_dev_stop_flush);
1448 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1449 test_multi_queue_enq_multi_port_deq);
1450 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1451 test_queue_to_port_single_link);
1452 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1453 test_queue_to_port_multi_link);
1454 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1455 test_multi_port_flow_ordered_to_atomic);
1456 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1457 test_multi_port_flow_ordered_to_ordered);
1458 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1459 test_multi_port_flow_ordered_to_parallel);
1460 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1461 test_multi_port_flow_atomic_to_atomic);
1462 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1463 test_multi_port_flow_atomic_to_ordered);
1464 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1465 test_multi_port_flow_atomic_to_parallel);
1466 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1467 test_multi_port_flow_parallel_to_atomic);
1468 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1469 test_multi_port_flow_parallel_to_ordered);
1470 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1471 test_multi_port_flow_parallel_to_parallel);
1472 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1473 test_multi_port_queue_ordered_to_atomic);
1474 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1475 test_multi_port_queue_ordered_to_ordered);
1476 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1477 test_multi_port_queue_ordered_to_parallel);
1478 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1479 test_multi_port_queue_atomic_to_atomic);
1480 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1481 test_multi_port_queue_atomic_to_ordered);
1482 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1483 test_multi_port_queue_atomic_to_parallel);
1484 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1485 test_multi_port_queue_parallel_to_atomic);
1486 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1487 test_multi_port_queue_parallel_to_ordered);
1488 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1489 test_multi_port_queue_parallel_to_parallel);
1490 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1491 test_multi_port_flow_max_stages_random_sched_type);
1492 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1493 test_multi_port_queue_max_stages_random_sched_type);
1494 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1495 test_multi_port_mixed_max_stages_random_sched_type);
1496 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1497 test_flow_producer_consumer_ingress_order_test);
1498 OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
1499 test_queue_producer_consumer_ingress_order_test);
1500 OCTEONTX2_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
1501 test_multi_queue_priority);
1502 OCTEONTX2_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1503 test_multi_port_flow_ordered_to_atomic);
1504 OCTEONTX2_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1505 test_multi_port_queue_ordered_to_atomic);
1506 printf("Total tests : %d\n", total);
1507 printf("Passed : %d\n", passed);
1508 printf("Failed : %d\n", failed);
1509 printf("Not supported : %d\n", unsupported);
1511 testsuite_teardown();