1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_launch.h>
17 #include <rte_lcore.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
20 #include <rte_bus_vdev.h>
23 #include "ssovf_evdev.h"
25 #define NUM_PACKETS (1 << 18)
26 #define MAX_EVENTS (16 * 1024)
28 #define OCTEONTX_TEST_RUN(setup, teardown, test) \
29 octeontx_test_run(setup, teardown, test, #test)
34 static int unsupported;
37 static struct rte_mempool *eventdev_test_mempool;
42 uint8_t sub_event_type;
48 static uint32_t seqn_list_index;
49 static int seqn_list[NUM_PACKETS];
54 RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
55 memset(seqn_list, 0, sizeof(seqn_list));
60 seqn_list_update(int val)
62 if (seqn_list_index >= NUM_PACKETS)
65 seqn_list[seqn_list_index++] = val;
71 seqn_list_check(int limit)
75 for (i = 0; i < limit; i++) {
76 if (seqn_list[i] != i) {
77 ssovf_log_dbg("Seqn mismatch %d %d", seqn_list[i], i);
84 struct test_core_param {
85 rte_atomic32_t *total_events;
86 uint64_t dequeue_tmo_ticks;
94 const char *eventdev_name = "event_octeontx";
96 evdev = rte_event_dev_get_dev_id(eventdev_name);
98 ssovf_log_dbg("%d: Eventdev %s not found - creating.",
99 __LINE__, eventdev_name);
100 if (rte_vdev_init(eventdev_name, NULL) < 0) {
101 ssovf_log_dbg("Error creating eventdev %s",
105 evdev = rte_event_dev_get_dev_id(eventdev_name);
107 ssovf_log_dbg("Error finding newly created eventdev");
116 testsuite_teardown(void)
118 rte_event_dev_close(evdev);
122 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
123 struct rte_event_dev_info *info)
125 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
126 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
127 dev_conf->nb_event_ports = info->max_event_ports;
128 dev_conf->nb_event_queues = info->max_event_queues;
129 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
130 dev_conf->nb_event_port_dequeue_depth =
131 info->max_event_port_dequeue_depth;
132 dev_conf->nb_event_port_enqueue_depth =
133 info->max_event_port_enqueue_depth;
134 dev_conf->nb_event_port_enqueue_depth =
135 info->max_event_port_enqueue_depth;
136 dev_conf->nb_events_limit =
137 info->max_num_events;
141 TEST_EVENTDEV_SETUP_DEFAULT,
142 TEST_EVENTDEV_SETUP_PRIORITY,
143 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
147 _eventdev_setup(int mode)
150 struct rte_event_dev_config dev_conf;
151 struct rte_event_dev_info info;
152 const char *pool_name = "evdev_octeontx_test_pool";
154 /* Create and destrory pool for each test case to make it standalone */
155 eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
157 0 /*MBUF_CACHE_SIZE*/,
159 512, /* Use very small mbufs */
161 if (!eventdev_test_mempool) {
162 ssovf_log_dbg("ERROR creating mempool");
166 ret = rte_event_dev_info_get(evdev, &info);
167 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
168 RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
169 "ERROR max_num_events=%d < max_events=%d",
170 info.max_num_events, MAX_EVENTS);
172 devconf_set_default_sane_values(&dev_conf, &info);
173 if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
174 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
176 ret = rte_event_dev_configure(evdev, &dev_conf);
177 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
179 uint32_t queue_count;
180 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
181 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
182 &queue_count), "Queue count get failed");
184 if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
185 if (queue_count > 8) {
187 "test expects the unique priority per queue");
191 /* Configure event queues(0 to n) with
192 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
193 * RTE_EVENT_DEV_PRIORITY_LOWEST
195 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
197 for (i = 0; i < (int)queue_count; i++) {
198 struct rte_event_queue_conf queue_conf;
200 ret = rte_event_queue_default_conf_get(evdev, i,
202 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
204 queue_conf.priority = i * step;
205 ret = rte_event_queue_setup(evdev, i, &queue_conf);
206 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
211 /* Configure event queues with default priority */
212 for (i = 0; i < (int)queue_count; i++) {
213 ret = rte_event_queue_setup(evdev, i, NULL);
214 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
218 /* Configure event ports */
220 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
221 RTE_EVENT_DEV_ATTR_PORT_COUNT,
222 &port_count), "Port count get failed");
223 for (i = 0; i < (int)port_count; i++) {
224 ret = rte_event_port_setup(evdev, i, NULL);
225 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
226 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
227 RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
231 ret = rte_event_dev_start(evdev);
232 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
240 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
244 eventdev_setup_priority(void)
246 return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
250 eventdev_setup_dequeue_timeout(void)
252 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
256 eventdev_teardown(void)
258 rte_event_dev_stop(evdev);
259 rte_mempool_free(eventdev_test_mempool);
263 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
264 uint32_t flow_id, uint8_t event_type,
265 uint8_t sub_event_type, uint8_t sched_type,
266 uint8_t queue, uint8_t port)
268 struct event_attr *attr;
270 /* Store the event attributes in mbuf for future reference */
271 attr = rte_pktmbuf_mtod(m, struct event_attr *);
272 attr->flow_id = flow_id;
273 attr->event_type = event_type;
274 attr->sub_event_type = sub_event_type;
275 attr->sched_type = sched_type;
279 ev->flow_id = flow_id;
280 ev->sub_event_type = sub_event_type;
281 ev->event_type = event_type;
282 /* Inject the new event */
283 ev->op = RTE_EVENT_OP_NEW;
284 ev->sched_type = sched_type;
285 ev->queue_id = queue;
290 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
291 uint8_t sched_type, uint8_t queue, uint8_t port,
297 for (i = 0; i < events; i++) {
298 struct rte_event ev = {.event = 0, .u64 = 0};
300 m = rte_pktmbuf_alloc(eventdev_test_mempool);
301 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
304 update_event_and_validation_attr(m, &ev, flow_id, event_type,
305 sub_event_type, sched_type, queue, port);
306 rte_event_enqueue_burst(evdev, port, &ev, 1);
312 check_excess_events(uint8_t port)
315 uint16_t valid_event;
318 /* Check for excess events, try for a few times and exit */
319 for (i = 0; i < 32; i++) {
320 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
322 RTE_TEST_ASSERT_SUCCESS(valid_event,
323 "Unexpected valid event=%d", ev.mbuf->seqn);
329 generate_random_events(const unsigned int total_events)
331 struct rte_event_dev_info info;
335 uint32_t queue_count;
336 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
337 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
338 &queue_count), "Queue count get failed");
340 ret = rte_event_dev_info_get(evdev, &info);
341 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
342 for (i = 0; i < total_events; i++) {
344 rte_rand() % info.max_event_queue_flows /*flow_id */,
345 RTE_EVENT_TYPE_CPU /* event_type */,
346 rte_rand() % 256 /* sub_event_type */,
347 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
348 rte_rand() % queue_count /* queue */,
359 validate_event(struct rte_event *ev)
361 struct event_attr *attr;
363 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
364 RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
365 "flow_id mismatch enq=%d deq =%d",
366 attr->flow_id, ev->flow_id);
367 RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
368 "event_type mismatch enq=%d deq =%d",
369 attr->event_type, ev->event_type);
370 RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
371 "sub_event_type mismatch enq=%d deq =%d",
372 attr->sub_event_type, ev->sub_event_type);
373 RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
374 "sched_type mismatch enq=%d deq =%d",
375 attr->sched_type, ev->sched_type);
376 RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
377 "queue mismatch enq=%d deq =%d",
378 attr->queue, ev->queue_id);
382 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
383 struct rte_event *ev);
386 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
389 uint16_t valid_event;
390 uint32_t events = 0, forward_progress_cnt = 0, index = 0;
394 if (++forward_progress_cnt > UINT16_MAX) {
395 ssovf_log_dbg("Detected deadlock");
399 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
403 forward_progress_cnt = 0;
404 ret = validate_event(&ev);
409 ret = fn(index, port, &ev);
410 RTE_TEST_ASSERT_SUCCESS(ret,
411 "Failed to validate test specific event");
416 rte_pktmbuf_free(ev.mbuf);
417 if (++events >= total_events)
421 return check_excess_events(port);
425 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
428 RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
429 index, ev->mbuf->seqn);
434 test_simple_enqdeq(uint8_t sched_type)
438 ret = inject_events(0 /*flow_id */,
439 RTE_EVENT_TYPE_CPU /* event_type */,
440 0 /* sub_event_type */,
448 return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
452 test_simple_enqdeq_ordered(void)
454 return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
458 test_simple_enqdeq_atomic(void)
460 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
464 test_simple_enqdeq_parallel(void)
466 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
470 * Generate a prescribed number of events and spread them across available
471 * queues. On dequeue, using single event port(port 0) verify the enqueued
475 test_multi_queue_enq_single_port_deq(void)
479 ret = generate_random_events(MAX_EVENTS);
483 return consume_events(0 /* port */, MAX_EVENTS, NULL);
487 * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
490 * For example, Inject 32 events over 0..7 queues
491 * enqueue events 0, 8, 16, 24 in queue 0
492 * enqueue events 1, 9, 17, 25 in queue 1
495 * enqueue events 7, 15, 23, 31 in queue 7
497 * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
498 * order from queue0(highest priority) to queue7(lowest_priority)
501 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
503 uint32_t queue_count;
504 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
505 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
506 &queue_count), "Queue count get failed");
507 uint32_t range = MAX_EVENTS / queue_count;
508 uint32_t expected_val = (index % range) * queue_count;
510 expected_val += ev->queue_id;
512 RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
513 "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
514 ev->mbuf->seqn, index, expected_val, range,
515 queue_count, MAX_EVENTS);
520 test_multi_queue_priority(void)
524 int i, max_evts_roundoff;
526 /* See validate_queue_priority() comments for priority validate logic */
527 uint32_t queue_count;
528 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
529 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
530 &queue_count), "Queue count get failed");
531 max_evts_roundoff = MAX_EVENTS / queue_count;
532 max_evts_roundoff *= queue_count;
534 for (i = 0; i < max_evts_roundoff; i++) {
535 struct rte_event ev = {.event = 0, .u64 = 0};
537 m = rte_pktmbuf_alloc(eventdev_test_mempool);
538 RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
541 queue = i % queue_count;
542 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
543 0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
544 rte_event_enqueue_burst(evdev, 0, &ev, 1);
547 return consume_events(0, max_evts_roundoff, validate_queue_priority);
551 worker_multi_port_fn(void *arg)
553 struct test_core_param *param = arg;
555 uint16_t valid_event;
556 uint8_t port = param->port;
557 rte_atomic32_t *total_events = param->total_events;
560 while (rte_atomic32_read(total_events) > 0) {
561 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
565 ret = validate_event(&ev);
566 RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
567 rte_pktmbuf_free(ev.mbuf);
568 rte_atomic32_sub(total_events, 1);
574 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
576 uint64_t cycles, print_cycles;
579 print_cycles = cycles = rte_get_timer_cycles();
580 while (rte_eal_get_lcore_state(lcore) != FINISHED) {
581 uint64_t new_cycles = rte_get_timer_cycles();
583 if (new_cycles - print_cycles > rte_get_timer_hz()) {
584 ssovf_log_dbg("\r%s: events %d", __func__,
585 rte_atomic32_read(count));
586 print_cycles = new_cycles;
588 if (new_cycles - cycles > rte_get_timer_hz() * 10) {
590 "%s: No schedules for seconds, deadlock (%d)",
592 rte_atomic32_read(count));
593 rte_event_dev_dump(evdev, stdout);
598 rte_eal_mp_wait_lcore();
604 launch_workers_and_wait(int (*master_worker)(void *),
605 int (*slave_workers)(void *), uint32_t total_events,
606 uint8_t nb_workers, uint8_t sched_type)
611 struct test_core_param *param;
612 rte_atomic32_t atomic_total_events;
613 uint64_t dequeue_tmo_ticks;
618 rte_atomic32_set(&atomic_total_events, total_events);
621 param = malloc(sizeof(struct test_core_param) * nb_workers);
625 ret = rte_event_dequeue_timeout_ticks(evdev,
626 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
630 param[0].total_events = &atomic_total_events;
631 param[0].sched_type = sched_type;
633 param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
636 w_lcore = rte_get_next_lcore(
640 rte_eal_remote_launch(master_worker, ¶m[0], w_lcore);
642 for (port = 1; port < nb_workers; port++) {
643 param[port].total_events = &atomic_total_events;
644 param[port].sched_type = sched_type;
645 param[port].port = port;
646 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
648 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
649 rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore);
652 ret = wait_workers_to_join(w_lcore, &atomic_total_events);
658 * Generate a prescribed number of events and spread them across available
659 * queues. Dequeue the events through multiple ports and verify the enqueued
663 test_multi_queue_enq_multi_port_deq(void)
665 const unsigned int total_events = MAX_EVENTS;
669 ret = generate_random_events(total_events);
673 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
674 RTE_EVENT_DEV_ATTR_PORT_COUNT,
675 &nr_ports), "Port count get failed");
676 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
679 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
680 nr_ports, rte_lcore_count() - 1);
684 return launch_workers_and_wait(worker_multi_port_fn,
685 worker_multi_port_fn, total_events,
686 nr_ports, 0xff /* invalid */);
690 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
691 struct rte_event *ev)
694 RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
695 "queue mismatch enq=%d deq =%d",
701 * Link queue x to port x and check correctness of link by checking
702 * queue_id == x on dequeue on the specific port x
705 test_queue_to_port_single_link(void)
707 int i, nr_links, ret;
710 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
711 RTE_EVENT_DEV_ATTR_PORT_COUNT,
712 &port_count), "Port count get failed");
714 /* Unlink all connections that created in eventdev_setup */
715 for (i = 0; i < (int)port_count; i++) {
716 ret = rte_event_port_unlink(evdev, i, NULL, 0);
717 RTE_TEST_ASSERT(ret >= 0,
718 "Failed to unlink all queues port=%d", i);
721 uint32_t queue_count;
722 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
723 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
724 &queue_count), "Queue count get failed");
726 nr_links = RTE_MIN(port_count, queue_count);
727 const unsigned int total_events = MAX_EVENTS / nr_links;
729 /* Link queue x to port x and inject events to queue x through port x */
730 for (i = 0; i < nr_links; i++) {
731 uint8_t queue = (uint8_t)i;
733 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
734 RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
738 RTE_EVENT_TYPE_CPU /* event_type */,
739 rte_rand() % 256 /* sub_event_type */,
740 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
743 total_events /* events */);
748 /* Verify the events generated from correct queue */
749 for (i = 0; i < nr_links; i++) {
750 ret = consume_events(i /* port */, total_events,
751 validate_queue_to_port_single_link);
760 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
761 struct rte_event *ev)
764 RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
765 "queue mismatch enq=%d deq =%d",
771 * Link all even number of queues to port 0 and all odd number of queues to
772 * port 1 and verify the link connection on dequeue
775 test_queue_to_port_multi_link(void)
777 int ret, port0_events = 0, port1_events = 0;
779 uint32_t nr_queues = 0;
780 uint32_t nr_ports = 0;
782 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
783 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
784 &nr_queues), "Queue count get failed");
786 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
787 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
788 &nr_queues), "Queue count get failed");
789 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
790 RTE_EVENT_DEV_ATTR_PORT_COUNT,
791 &nr_ports), "Port count get failed");
794 ssovf_log_dbg("%s: Not enough ports to test ports=%d",
799 /* Unlink all connections that created in eventdev_setup */
800 for (port = 0; port < nr_ports; port++) {
801 ret = rte_event_port_unlink(evdev, port, NULL, 0);
802 RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
806 const unsigned int total_events = MAX_EVENTS / nr_queues;
808 /* Link all even number of queues to port0 and odd numbers to port 1*/
809 for (queue = 0; queue < nr_queues; queue++) {
811 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
812 RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
817 RTE_EVENT_TYPE_CPU /* event_type */,
818 rte_rand() % 256 /* sub_event_type */,
819 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
822 total_events /* events */);
827 port0_events += total_events;
829 port1_events += total_events;
832 ret = consume_events(0 /* port */, port0_events,
833 validate_queue_to_port_multi_link);
836 ret = consume_events(1 /* port */, port1_events,
837 validate_queue_to_port_multi_link);
845 worker_flow_based_pipeline(void *arg)
847 struct test_core_param *param = arg;
849 uint16_t valid_event;
850 uint8_t port = param->port;
851 uint8_t new_sched_type = param->sched_type;
852 rte_atomic32_t *total_events = param->total_events;
853 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
855 while (rte_atomic32_read(total_events) > 0) {
856 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
861 /* Events from stage 0 */
862 if (ev.sub_event_type == 0) {
863 /* Move to atomic flow to maintain the ordering */
865 ev.event_type = RTE_EVENT_TYPE_CPU;
866 ev.sub_event_type = 1; /* stage 1 */
867 ev.sched_type = new_sched_type;
868 ev.op = RTE_EVENT_OP_FORWARD;
869 rte_event_enqueue_burst(evdev, port, &ev, 1);
870 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
871 if (seqn_list_update(ev.mbuf->seqn) == 0) {
872 rte_pktmbuf_free(ev.mbuf);
873 rte_atomic32_sub(total_events, 1);
875 ssovf_log_dbg("Failed to update seqn_list");
879 ssovf_log_dbg("Invalid ev.sub_event_type = %d",
888 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
889 uint8_t out_sched_type)
891 const unsigned int total_events = MAX_EVENTS;
895 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
896 RTE_EVENT_DEV_ATTR_PORT_COUNT,
897 &nr_ports), "Port count get failed");
898 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
901 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
902 nr_ports, rte_lcore_count() - 1);
906 /* Injects events with m->seqn=0 to total_events */
909 RTE_EVENT_TYPE_CPU /* event_type */,
910 0 /* sub_event_type (stage 0) */,
914 total_events /* events */);
918 ret = launch_workers_and_wait(worker_flow_based_pipeline,
919 worker_flow_based_pipeline,
920 total_events, nr_ports, out_sched_type);
924 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
925 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
926 /* Check the events order maintained or not */
927 return seqn_list_check(total_events);
933 /* Multi port ordered to atomic transaction */
935 test_multi_port_flow_ordered_to_atomic(void)
937 /* Ingress event order test */
938 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
939 RTE_SCHED_TYPE_ATOMIC);
943 test_multi_port_flow_ordered_to_ordered(void)
945 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
946 RTE_SCHED_TYPE_ORDERED);
950 test_multi_port_flow_ordered_to_parallel(void)
952 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
953 RTE_SCHED_TYPE_PARALLEL);
957 test_multi_port_flow_atomic_to_atomic(void)
959 /* Ingress event order test */
960 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
961 RTE_SCHED_TYPE_ATOMIC);
965 test_multi_port_flow_atomic_to_ordered(void)
967 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
968 RTE_SCHED_TYPE_ORDERED);
972 test_multi_port_flow_atomic_to_parallel(void)
974 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
975 RTE_SCHED_TYPE_PARALLEL);
979 test_multi_port_flow_parallel_to_atomic(void)
981 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
982 RTE_SCHED_TYPE_ATOMIC);
986 test_multi_port_flow_parallel_to_ordered(void)
988 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
989 RTE_SCHED_TYPE_ORDERED);
993 test_multi_port_flow_parallel_to_parallel(void)
995 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
996 RTE_SCHED_TYPE_PARALLEL);
1000 worker_group_based_pipeline(void *arg)
1002 struct test_core_param *param = arg;
1003 struct rte_event ev;
1004 uint16_t valid_event;
1005 uint8_t port = param->port;
1006 uint8_t new_sched_type = param->sched_type;
1007 rte_atomic32_t *total_events = param->total_events;
1008 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
1010 while (rte_atomic32_read(total_events) > 0) {
1011 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
1016 /* Events from stage 0(group 0) */
1017 if (ev.queue_id == 0) {
1018 /* Move to atomic flow to maintain the ordering */
1020 ev.event_type = RTE_EVENT_TYPE_CPU;
1021 ev.sched_type = new_sched_type;
1022 ev.queue_id = 1; /* Stage 1*/
1023 ev.op = RTE_EVENT_OP_FORWARD;
1024 rte_event_enqueue_burst(evdev, port, &ev, 1);
1025 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
1026 if (seqn_list_update(ev.mbuf->seqn) == 0) {
1027 rte_pktmbuf_free(ev.mbuf);
1028 rte_atomic32_sub(total_events, 1);
1030 ssovf_log_dbg("Failed to update seqn_list");
1034 ssovf_log_dbg("Invalid ev.queue_id = %d", ev.queue_id);
1044 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1045 uint8_t out_sched_type)
1047 const unsigned int total_events = MAX_EVENTS;
1051 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1052 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1053 &nr_ports), "Port count get failed");
1055 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1057 uint32_t queue_count;
1058 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1059 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1060 &queue_count), "Queue count get failed");
1061 if (queue_count < 2 || !nr_ports) {
1062 ssovf_log_dbg("%s: Not enough queues=%d ports=%d or workers=%d",
1063 __func__, queue_count, nr_ports,
1064 rte_lcore_count() - 1);
1068 /* Injects events with m->seqn=0 to total_events */
1069 ret = inject_events(
1071 RTE_EVENT_TYPE_CPU /* event_type */,
1072 0 /* sub_event_type (stage 0) */,
1076 total_events /* events */);
1080 ret = launch_workers_and_wait(worker_group_based_pipeline,
1081 worker_group_based_pipeline,
1082 total_events, nr_ports, out_sched_type);
1086 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1087 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1088 /* Check the events order maintained or not */
1089 return seqn_list_check(total_events);
1095 test_multi_port_queue_ordered_to_atomic(void)
1097 /* Ingress event order test */
1098 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1099 RTE_SCHED_TYPE_ATOMIC);
1103 test_multi_port_queue_ordered_to_ordered(void)
1105 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1106 RTE_SCHED_TYPE_ORDERED);
1110 test_multi_port_queue_ordered_to_parallel(void)
1112 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1113 RTE_SCHED_TYPE_PARALLEL);
1117 test_multi_port_queue_atomic_to_atomic(void)
1119 /* Ingress event order test */
1120 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1121 RTE_SCHED_TYPE_ATOMIC);
1125 test_multi_port_queue_atomic_to_ordered(void)
1127 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1128 RTE_SCHED_TYPE_ORDERED);
1132 test_multi_port_queue_atomic_to_parallel(void)
1134 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1135 RTE_SCHED_TYPE_PARALLEL);
1139 test_multi_port_queue_parallel_to_atomic(void)
1141 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1142 RTE_SCHED_TYPE_ATOMIC);
1146 test_multi_port_queue_parallel_to_ordered(void)
1148 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1149 RTE_SCHED_TYPE_ORDERED);
1153 test_multi_port_queue_parallel_to_parallel(void)
1155 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1156 RTE_SCHED_TYPE_PARALLEL);
1160 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1162 struct test_core_param *param = arg;
1163 struct rte_event ev;
1164 uint16_t valid_event;
1165 uint8_t port = param->port;
1166 rte_atomic32_t *total_events = param->total_events;
1168 while (rte_atomic32_read(total_events) > 0) {
1169 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1173 if (ev.sub_event_type == 255) { /* last stage */
1174 rte_pktmbuf_free(ev.mbuf);
1175 rte_atomic32_sub(total_events, 1);
1177 ev.event_type = RTE_EVENT_TYPE_CPU;
1178 ev.sub_event_type++;
1180 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1181 ev.op = RTE_EVENT_OP_FORWARD;
1182 rte_event_enqueue_burst(evdev, port, &ev, 1);
1189 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1194 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1195 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1196 &nr_ports), "Port count get failed");
1197 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1200 ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
1201 nr_ports, rte_lcore_count() - 1);
1205 /* Injects events with m->seqn=0 to total_events */
1206 ret = inject_events(
1208 RTE_EVENT_TYPE_CPU /* event_type */,
1209 0 /* sub_event_type (stage 0) */,
1210 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1213 MAX_EVENTS /* events */);
1217 return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1218 0xff /* invalid */);
1221 /* Flow based pipeline with maximum stages with random sched type */
1223 test_multi_port_flow_max_stages_random_sched_type(void)
1225 return launch_multi_port_max_stages_random_sched_type(
1226 worker_flow_based_pipeline_max_stages_rand_sched_type);
1230 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1232 struct test_core_param *param = arg;
1233 struct rte_event ev;
1234 uint16_t valid_event;
1235 uint8_t port = param->port;
1236 uint32_t queue_count;
1237 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1238 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1239 &queue_count), "Queue count get failed");
1240 uint8_t nr_queues = queue_count;
1241 rte_atomic32_t *total_events = param->total_events;
1243 while (rte_atomic32_read(total_events) > 0) {
1244 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1248 if (ev.queue_id == nr_queues - 1) { /* last stage */
1249 rte_pktmbuf_free(ev.mbuf);
1250 rte_atomic32_sub(total_events, 1);
1252 ev.event_type = RTE_EVENT_TYPE_CPU;
1255 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1256 ev.op = RTE_EVENT_OP_FORWARD;
1257 rte_event_enqueue_burst(evdev, port, &ev, 1);
1263 /* Queue based pipeline with maximum stages with random sched type */
1265 test_multi_port_queue_max_stages_random_sched_type(void)
1267 return launch_multi_port_max_stages_random_sched_type(
1268 worker_queue_based_pipeline_max_stages_rand_sched_type);
1272 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1274 struct test_core_param *param = arg;
1275 struct rte_event ev;
1276 uint16_t valid_event;
1277 uint8_t port = param->port;
1278 uint32_t queue_count;
1279 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1280 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1281 &queue_count), "Queue count get failed");
1282 uint8_t nr_queues = queue_count;
1283 rte_atomic32_t *total_events = param->total_events;
1285 while (rte_atomic32_read(total_events) > 0) {
1286 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1290 if (ev.queue_id == nr_queues - 1) { /* Last stage */
1291 rte_pktmbuf_free(ev.mbuf);
1292 rte_atomic32_sub(total_events, 1);
1294 ev.event_type = RTE_EVENT_TYPE_CPU;
1296 ev.sub_event_type = rte_rand() % 256;
1298 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1299 ev.op = RTE_EVENT_OP_FORWARD;
1300 rte_event_enqueue_burst(evdev, port, &ev, 1);
1306 /* Queue and flow based pipeline with maximum stages with random sched type */
1308 test_multi_port_mixed_max_stages_random_sched_type(void)
1310 return launch_multi_port_max_stages_random_sched_type(
1311 worker_mixed_pipeline_max_stages_rand_sched_type);
1315 worker_ordered_flow_producer(void *arg)
1317 struct test_core_param *param = arg;
1318 uint8_t port = param->port;
1322 while (counter < NUM_PACKETS) {
1323 m = rte_pktmbuf_alloc(eventdev_test_mempool);
1327 m->seqn = counter++;
1329 struct rte_event ev = {.event = 0, .u64 = 0};
1331 ev.flow_id = 0x1; /* Generate a fat flow */
1332 ev.sub_event_type = 0;
1333 /* Inject the new event */
1334 ev.op = RTE_EVENT_OP_NEW;
1335 ev.event_type = RTE_EVENT_TYPE_CPU;
1336 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1339 rte_event_enqueue_burst(evdev, port, &ev, 1);
1346 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1350 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1351 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1352 &nr_ports), "Port count get failed");
1353 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1355 if (rte_lcore_count() < 3 || nr_ports < 2) {
1356 ssovf_log_dbg("### Not enough cores for %s test.", __func__);
1360 launch_workers_and_wait(worker_ordered_flow_producer, fn,
1361 NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
1362 /* Check the events order maintained or not */
1363 return seqn_list_check(NUM_PACKETS);
1366 /* Flow based producer consumer ingress order test */
1368 test_flow_producer_consumer_ingress_order_test(void)
1370 return test_producer_consumer_ingress_order_test(
1371 worker_flow_based_pipeline);
1374 /* Queue based producer consumer ingress order test */
1376 test_queue_producer_consumer_ingress_order_test(void)
1378 return test_producer_consumer_ingress_order_test(
1379 worker_group_based_pipeline);
1382 static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),
1383 int (*test)(void), const char *name)
1386 ssovf_log_selftest("Error setting up test %s", name);
1391 ssovf_log_selftest("%s Failed", name);
1394 ssovf_log_selftest("%s Passed", name);
1403 test_eventdev_octeontx(void)
1407 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1408 test_simple_enqdeq_ordered);
1409 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1410 test_simple_enqdeq_atomic);
1411 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1412 test_simple_enqdeq_parallel);
1413 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1414 test_multi_queue_enq_single_port_deq);
1415 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1416 test_multi_queue_enq_multi_port_deq);
1417 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1418 test_queue_to_port_single_link);
1419 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1420 test_queue_to_port_multi_link);
1421 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1422 test_multi_port_flow_ordered_to_atomic);
1423 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1424 test_multi_port_flow_ordered_to_ordered);
1425 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1426 test_multi_port_flow_ordered_to_parallel);
1427 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1428 test_multi_port_flow_atomic_to_atomic);
1429 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1430 test_multi_port_flow_atomic_to_ordered);
1431 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1432 test_multi_port_flow_atomic_to_parallel);
1433 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1434 test_multi_port_flow_parallel_to_atomic);
1435 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1436 test_multi_port_flow_parallel_to_ordered);
1437 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1438 test_multi_port_flow_parallel_to_parallel);
1439 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1440 test_multi_port_queue_ordered_to_atomic);
1441 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1442 test_multi_port_queue_ordered_to_ordered);
1443 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1444 test_multi_port_queue_ordered_to_parallel);
1445 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1446 test_multi_port_queue_atomic_to_atomic);
1447 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1448 test_multi_port_queue_atomic_to_ordered);
1449 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1450 test_multi_port_queue_atomic_to_parallel);
1451 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1452 test_multi_port_queue_parallel_to_atomic);
1453 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1454 test_multi_port_queue_parallel_to_ordered);
1455 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1456 test_multi_port_queue_parallel_to_parallel);
1457 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1458 test_multi_port_flow_max_stages_random_sched_type);
1459 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1460 test_multi_port_queue_max_stages_random_sched_type);
1461 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1462 test_multi_port_mixed_max_stages_random_sched_type);
1463 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1464 test_flow_producer_consumer_ingress_order_test);
1465 OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
1466 test_queue_producer_consumer_ingress_order_test);
1467 OCTEONTX_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
1468 test_multi_queue_priority);
1469 OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1470 test_multi_port_flow_ordered_to_atomic);
1471 OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
1472 test_multi_port_queue_ordered_to_atomic);
1474 ssovf_log_selftest("Total tests : %d", total);
1475 ssovf_log_selftest("Passed : %d", passed);
1476 ssovf_log_selftest("Failed : %d", failed);
1477 ssovf_log_selftest("Not supported : %d", unsupported);
1479 testsuite_teardown();