1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_launch.h>
17 #include <rte_lcore.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
20 #include <rte_bus_vdev.h>
24 #define NUM_PACKETS (1 << 18)
25 #define MAX_EVENTS (16 * 1024)
28 static struct rte_mempool *eventdev_test_mempool;
33 uint8_t sub_event_type;
39 static uint32_t seqn_list_index;
40 static int seqn_list[NUM_PACKETS];
45 RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
46 memset(seqn_list, 0, sizeof(seqn_list));
51 seqn_list_update(int val)
53 if (seqn_list_index >= NUM_PACKETS)
56 seqn_list[seqn_list_index++] = val;
62 seqn_list_check(int limit)
66 for (i = 0; i < limit; i++) {
67 if (seqn_list[i] != i) {
68 printf("Seqn mismatch %d %d\n", seqn_list[i], i);
75 struct test_core_param {
76 rte_atomic32_t *total_events;
77 uint64_t dequeue_tmo_ticks;
85 const char *eventdev_name = "event_octeontx";
87 evdev = rte_event_dev_get_dev_id(eventdev_name);
89 printf("%d: Eventdev %s not found - creating.\n",
90 __LINE__, eventdev_name);
91 if (rte_vdev_init(eventdev_name, NULL) < 0) {
92 printf("Error creating eventdev %s\n", eventdev_name);
95 evdev = rte_event_dev_get_dev_id(eventdev_name);
97 printf("Error finding newly created eventdev\n");
106 testsuite_teardown(void)
108 rte_event_dev_close(evdev);
112 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
113 struct rte_event_dev_info *info)
115 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
116 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
117 dev_conf->nb_event_ports = info->max_event_ports;
118 dev_conf->nb_event_queues = info->max_event_queues;
119 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
120 dev_conf->nb_event_port_dequeue_depth =
121 info->max_event_port_dequeue_depth;
122 dev_conf->nb_event_port_enqueue_depth =
123 info->max_event_port_enqueue_depth;
124 dev_conf->nb_event_port_enqueue_depth =
125 info->max_event_port_enqueue_depth;
126 dev_conf->nb_events_limit =
127 info->max_num_events;
131 TEST_EVENTDEV_SETUP_DEFAULT,
132 TEST_EVENTDEV_SETUP_PRIORITY,
133 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
137 _eventdev_setup(int mode)
140 struct rte_event_dev_config dev_conf;
141 struct rte_event_dev_info info;
142 const char *pool_name = "evdev_octeontx_test_pool";
144 /* Create and destrory pool for each test case to make it standalone */
145 eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
147 0 /*MBUF_CACHE_SIZE*/,
149 512, /* Use very small mbufs */
151 if (!eventdev_test_mempool) {
152 printf("ERROR creating mempool\n");
156 ret = rte_event_dev_info_get(evdev, &info);
157 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
158 TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
159 "max_num_events=%d < max_events=%d",
160 info.max_num_events, MAX_EVENTS);
162 devconf_set_default_sane_values(&dev_conf, &info);
163 if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
164 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
166 ret = rte_event_dev_configure(evdev, &dev_conf);
167 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
169 uint32_t queue_count;
170 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
171 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
172 &queue_count), "Queue count get failed");
174 if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
175 if (queue_count > 8) {
176 printf("test expects the unique priority per queue\n");
180 /* Configure event queues(0 to n) with
181 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
182 * RTE_EVENT_DEV_PRIORITY_LOWEST
184 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
186 for (i = 0; i < (int)queue_count; i++) {
187 struct rte_event_queue_conf queue_conf;
189 ret = rte_event_queue_default_conf_get(evdev, i,
191 TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d", i);
192 queue_conf.priority = i * step;
193 ret = rte_event_queue_setup(evdev, i, &queue_conf);
194 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
198 /* Configure event queues with default priority */
199 for (i = 0; i < (int)queue_count; i++) {
200 ret = rte_event_queue_setup(evdev, i, NULL);
201 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
204 /* Configure event ports */
206 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
207 RTE_EVENT_DEV_ATTR_PORT_COUNT,
208 &port_count), "Port count get failed");
209 for (i = 0; i < (int)port_count; i++) {
210 ret = rte_event_port_setup(evdev, i, NULL);
211 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
212 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
213 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", i);
216 ret = rte_event_dev_start(evdev);
217 TEST_ASSERT_SUCCESS(ret, "Failed to start device");
225 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
229 eventdev_setup_priority(void)
231 return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
235 eventdev_setup_dequeue_timeout(void)
237 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
241 eventdev_teardown(void)
243 rte_event_dev_stop(evdev);
244 rte_mempool_free(eventdev_test_mempool);
248 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
249 uint32_t flow_id, uint8_t event_type,
250 uint8_t sub_event_type, uint8_t sched_type,
251 uint8_t queue, uint8_t port)
253 struct event_attr *attr;
255 /* Store the event attributes in mbuf for future reference */
256 attr = rte_pktmbuf_mtod(m, struct event_attr *);
257 attr->flow_id = flow_id;
258 attr->event_type = event_type;
259 attr->sub_event_type = sub_event_type;
260 attr->sched_type = sched_type;
264 ev->flow_id = flow_id;
265 ev->sub_event_type = sub_event_type;
266 ev->event_type = event_type;
267 /* Inject the new event */
268 ev->op = RTE_EVENT_OP_NEW;
269 ev->sched_type = sched_type;
270 ev->queue_id = queue;
275 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
276 uint8_t sched_type, uint8_t queue, uint8_t port,
282 for (i = 0; i < events; i++) {
283 struct rte_event ev = {.event = 0, .u64 = 0};
285 m = rte_pktmbuf_alloc(eventdev_test_mempool);
286 TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
289 update_event_and_validation_attr(m, &ev, flow_id, event_type,
290 sub_event_type, sched_type, queue, port);
291 rte_event_enqueue_burst(evdev, port, &ev, 1);
297 check_excess_events(uint8_t port)
300 uint16_t valid_event;
303 /* Check for excess events, try for a few times and exit */
304 for (i = 0; i < 32; i++) {
305 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
307 TEST_ASSERT_SUCCESS(valid_event, "Unexpected valid event=%d",
314 generate_random_events(const unsigned int total_events)
316 struct rte_event_dev_info info;
320 uint32_t queue_count;
321 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
322 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
323 &queue_count), "Queue count get failed");
325 ret = rte_event_dev_info_get(evdev, &info);
326 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
327 for (i = 0; i < total_events; i++) {
329 rte_rand() % info.max_event_queue_flows /*flow_id */,
330 RTE_EVENT_TYPE_CPU /* event_type */,
331 rte_rand() % 256 /* sub_event_type */,
332 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
333 rte_rand() % queue_count /* queue */,
344 validate_event(struct rte_event *ev)
346 struct event_attr *attr;
348 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
349 TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
350 "flow_id mismatch enq=%d deq =%d",
351 attr->flow_id, ev->flow_id);
352 TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
353 "event_type mismatch enq=%d deq =%d",
354 attr->event_type, ev->event_type);
355 TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
356 "sub_event_type mismatch enq=%d deq =%d",
357 attr->sub_event_type, ev->sub_event_type);
358 TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
359 "sched_type mismatch enq=%d deq =%d",
360 attr->sched_type, ev->sched_type);
361 TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
362 "queue mismatch enq=%d deq =%d",
363 attr->queue, ev->queue_id);
367 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
368 struct rte_event *ev);
371 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
374 uint16_t valid_event;
375 uint32_t events = 0, forward_progress_cnt = 0, index = 0;
379 if (++forward_progress_cnt > UINT16_MAX) {
380 printf("Detected deadlock\n");
384 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
388 forward_progress_cnt = 0;
389 ret = validate_event(&ev);
394 ret = fn(index, port, &ev);
395 TEST_ASSERT_SUCCESS(ret,
396 "Failed to validate test specific event");
401 rte_pktmbuf_free(ev.mbuf);
402 if (++events >= total_events)
406 return check_excess_events(port);
410 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
413 TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d", index,
419 test_simple_enqdeq(uint8_t sched_type)
423 ret = inject_events(0 /*flow_id */,
424 RTE_EVENT_TYPE_CPU /* event_type */,
425 0 /* sub_event_type */,
433 return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
437 test_simple_enqdeq_ordered(void)
439 return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
443 test_simple_enqdeq_atomic(void)
445 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
449 test_simple_enqdeq_parallel(void)
451 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
455 * Generate a prescribed number of events and spread them across available
456 * queues. On dequeue, using single event port(port 0) verify the enqueued
460 test_multi_queue_enq_single_port_deq(void)
464 ret = generate_random_events(MAX_EVENTS);
468 return consume_events(0 /* port */, MAX_EVENTS, NULL);
472 * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
475 * For example, Inject 32 events over 0..7 queues
476 * enqueue events 0, 8, 16, 24 in queue 0
477 * enqueue events 1, 9, 17, 25 in queue 1
480 * enqueue events 7, 15, 23, 31 in queue 7
482 * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
483 * order from queue0(highest priority) to queue7(lowest_priority)
486 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
488 uint32_t queue_count;
489 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
490 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
491 &queue_count), "Queue count get failed");
492 uint32_t range = MAX_EVENTS / queue_count;
493 uint32_t expected_val = (index % range) * queue_count;
495 expected_val += ev->queue_id;
497 TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
498 "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
499 ev->mbuf->seqn, index, expected_val, range,
500 queue_count, MAX_EVENTS);
505 test_multi_queue_priority(void)
509 int i, max_evts_roundoff;
511 /* See validate_queue_priority() comments for priority validate logic */
512 uint32_t queue_count;
513 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
514 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
515 &queue_count), "Queue count get failed");
516 max_evts_roundoff = MAX_EVENTS / queue_count;
517 max_evts_roundoff *= queue_count;
519 for (i = 0; i < max_evts_roundoff; i++) {
520 struct rte_event ev = {.event = 0, .u64 = 0};
522 m = rte_pktmbuf_alloc(eventdev_test_mempool);
523 TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
526 queue = i % queue_count;
527 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
528 0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
529 rte_event_enqueue_burst(evdev, 0, &ev, 1);
532 return consume_events(0, max_evts_roundoff, validate_queue_priority);
536 worker_multi_port_fn(void *arg)
538 struct test_core_param *param = arg;
540 uint16_t valid_event;
541 uint8_t port = param->port;
542 rte_atomic32_t *total_events = param->total_events;
545 while (rte_atomic32_read(total_events) > 0) {
546 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
550 ret = validate_event(&ev);
551 TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
552 rte_pktmbuf_free(ev.mbuf);
553 rte_atomic32_sub(total_events, 1);
559 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
561 uint64_t cycles, print_cycles;
563 print_cycles = cycles = rte_get_timer_cycles();
564 while (rte_eal_get_lcore_state(lcore) != FINISHED) {
565 uint64_t new_cycles = rte_get_timer_cycles();
567 if (new_cycles - print_cycles > rte_get_timer_hz()) {
568 printf("\r%s: events %d\n", __func__,
569 rte_atomic32_read(count));
570 print_cycles = new_cycles;
572 if (new_cycles - cycles > rte_get_timer_hz() * 10) {
573 printf("%s: No schedules for seconds, deadlock (%d)\n",
575 rte_atomic32_read(count));
576 rte_event_dev_dump(evdev, stdout);
581 rte_eal_mp_wait_lcore();
587 launch_workers_and_wait(int (*master_worker)(void *),
588 int (*slave_workers)(void *), uint32_t total_events,
589 uint8_t nb_workers, uint8_t sched_type)
594 struct test_core_param *param;
595 rte_atomic32_t atomic_total_events;
596 uint64_t dequeue_tmo_ticks;
601 rte_atomic32_set(&atomic_total_events, total_events);
604 param = malloc(sizeof(struct test_core_param) * nb_workers);
608 ret = rte_event_dequeue_timeout_ticks(evdev,
609 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
613 param[0].total_events = &atomic_total_events;
614 param[0].sched_type = sched_type;
616 param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
619 w_lcore = rte_get_next_lcore(
623 rte_eal_remote_launch(master_worker, ¶m[0], w_lcore);
625 for (port = 1; port < nb_workers; port++) {
626 param[port].total_events = &atomic_total_events;
627 param[port].sched_type = sched_type;
628 param[port].port = port;
629 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
631 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
632 rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore);
635 ret = wait_workers_to_join(w_lcore, &atomic_total_events);
641 * Generate a prescribed number of events and spread them across available
642 * queues. Dequeue the events through multiple ports and verify the enqueued
646 test_multi_queue_enq_multi_port_deq(void)
648 const unsigned int total_events = MAX_EVENTS;
652 ret = generate_random_events(total_events);
656 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
657 RTE_EVENT_DEV_ATTR_PORT_COUNT,
658 &nr_ports), "Port count get failed");
659 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
662 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
663 nr_ports, rte_lcore_count() - 1);
667 return launch_workers_and_wait(worker_multi_port_fn,
668 worker_multi_port_fn, total_events,
669 nr_ports, 0xff /* invalid */);
673 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
674 struct rte_event *ev)
677 TEST_ASSERT_EQUAL(port, ev->queue_id,
678 "queue mismatch enq=%d deq =%d",
684 * Link queue x to port x and check correctness of link by checking
685 * queue_id == x on dequeue on the specific port x
688 test_queue_to_port_single_link(void)
690 int i, nr_links, ret;
693 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
694 RTE_EVENT_DEV_ATTR_PORT_COUNT,
695 &port_count), "Port count get failed");
697 /* Unlink all connections that created in eventdev_setup */
698 for (i = 0; i < (int)port_count; i++) {
699 ret = rte_event_port_unlink(evdev, i, NULL, 0);
700 TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d", i);
703 uint32_t queue_count;
704 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
705 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
706 &queue_count), "Queue count get failed");
708 nr_links = RTE_MIN(port_count, queue_count);
709 const unsigned int total_events = MAX_EVENTS / nr_links;
711 /* Link queue x to port x and inject events to queue x through port x */
712 for (i = 0; i < nr_links; i++) {
713 uint8_t queue = (uint8_t)i;
715 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
716 TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
720 RTE_EVENT_TYPE_CPU /* event_type */,
721 rte_rand() % 256 /* sub_event_type */,
722 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
725 total_events /* events */);
730 /* Verify the events generated from correct queue */
731 for (i = 0; i < nr_links; i++) {
732 ret = consume_events(i /* port */, total_events,
733 validate_queue_to_port_single_link);
742 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
743 struct rte_event *ev)
746 TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
747 "queue mismatch enq=%d deq =%d",
753 * Link all even number of queues to port 0 and all odd number of queues to
754 * port 1 and verify the link connection on dequeue
757 test_queue_to_port_multi_link(void)
759 int ret, port0_events = 0, port1_events = 0;
761 uint32_t nr_queues = 0;
762 uint32_t nr_ports = 0;
764 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
765 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
766 &nr_queues), "Queue count get failed");
768 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
769 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
770 &nr_queues), "Queue count get failed");
771 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
772 RTE_EVENT_DEV_ATTR_PORT_COUNT,
773 &nr_ports), "Port count get failed");
776 printf("%s: Not enough ports to test ports=%d\n",
781 /* Unlink all connections that created in eventdev_setup */
782 for (port = 0; port < nr_ports; port++) {
783 ret = rte_event_port_unlink(evdev, port, NULL, 0);
784 TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
788 const unsigned int total_events = MAX_EVENTS / nr_queues;
790 /* Link all even number of queues to port0 and odd numbers to port 1*/
791 for (queue = 0; queue < nr_queues; queue++) {
793 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
794 TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
799 RTE_EVENT_TYPE_CPU /* event_type */,
800 rte_rand() % 256 /* sub_event_type */,
801 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
804 total_events /* events */);
809 port0_events += total_events;
811 port1_events += total_events;
814 ret = consume_events(0 /* port */, port0_events,
815 validate_queue_to_port_multi_link);
818 ret = consume_events(1 /* port */, port1_events,
819 validate_queue_to_port_multi_link);
827 worker_flow_based_pipeline(void *arg)
829 struct test_core_param *param = arg;
831 uint16_t valid_event;
832 uint8_t port = param->port;
833 uint8_t new_sched_type = param->sched_type;
834 rte_atomic32_t *total_events = param->total_events;
835 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
837 while (rte_atomic32_read(total_events) > 0) {
838 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
843 /* Events from stage 0 */
844 if (ev.sub_event_type == 0) {
845 /* Move to atomic flow to maintain the ordering */
847 ev.event_type = RTE_EVENT_TYPE_CPU;
848 ev.sub_event_type = 1; /* stage 1 */
849 ev.sched_type = new_sched_type;
850 ev.op = RTE_EVENT_OP_FORWARD;
851 rte_event_enqueue_burst(evdev, port, &ev, 1);
852 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
853 if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
854 rte_pktmbuf_free(ev.mbuf);
855 rte_atomic32_sub(total_events, 1);
857 printf("Failed to update seqn_list\n");
861 printf("Invalid ev.sub_event_type = %d\n",
870 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
871 uint8_t out_sched_type)
873 const unsigned int total_events = MAX_EVENTS;
877 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
878 RTE_EVENT_DEV_ATTR_PORT_COUNT,
879 &nr_ports), "Port count get failed");
880 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
883 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
884 nr_ports, rte_lcore_count() - 1);
888 /* Injects events with m->seqn=0 to total_events */
891 RTE_EVENT_TYPE_CPU /* event_type */,
892 0 /* sub_event_type (stage 0) */,
896 total_events /* events */);
900 ret = launch_workers_and_wait(worker_flow_based_pipeline,
901 worker_flow_based_pipeline,
902 total_events, nr_ports, out_sched_type);
906 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
907 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
908 /* Check the events order maintained or not */
909 return seqn_list_check(total_events);
915 /* Multi port ordered to atomic transaction */
917 test_multi_port_flow_ordered_to_atomic(void)
919 /* Ingress event order test */
920 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
921 RTE_SCHED_TYPE_ATOMIC);
925 test_multi_port_flow_ordered_to_ordered(void)
927 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
928 RTE_SCHED_TYPE_ORDERED);
932 test_multi_port_flow_ordered_to_parallel(void)
934 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
935 RTE_SCHED_TYPE_PARALLEL);
939 test_multi_port_flow_atomic_to_atomic(void)
941 /* Ingress event order test */
942 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
943 RTE_SCHED_TYPE_ATOMIC);
947 test_multi_port_flow_atomic_to_ordered(void)
949 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
950 RTE_SCHED_TYPE_ORDERED);
954 test_multi_port_flow_atomic_to_parallel(void)
956 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
957 RTE_SCHED_TYPE_PARALLEL);
961 test_multi_port_flow_parallel_to_atomic(void)
963 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
964 RTE_SCHED_TYPE_ATOMIC);
968 test_multi_port_flow_parallel_to_ordered(void)
970 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
971 RTE_SCHED_TYPE_ORDERED);
975 test_multi_port_flow_parallel_to_parallel(void)
977 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
978 RTE_SCHED_TYPE_PARALLEL);
982 worker_group_based_pipeline(void *arg)
984 struct test_core_param *param = arg;
986 uint16_t valid_event;
987 uint8_t port = param->port;
988 uint8_t new_sched_type = param->sched_type;
989 rte_atomic32_t *total_events = param->total_events;
990 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
992 while (rte_atomic32_read(total_events) > 0) {
993 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
998 /* Events from stage 0(group 0) */
999 if (ev.queue_id == 0) {
1000 /* Move to atomic flow to maintain the ordering */
1002 ev.event_type = RTE_EVENT_TYPE_CPU;
1003 ev.sched_type = new_sched_type;
1004 ev.queue_id = 1; /* Stage 1*/
1005 ev.op = RTE_EVENT_OP_FORWARD;
1006 rte_event_enqueue_burst(evdev, port, &ev, 1);
1007 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
1008 if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
1009 rte_pktmbuf_free(ev.mbuf);
1010 rte_atomic32_sub(total_events, 1);
1012 printf("Failed to update seqn_list\n");
1016 printf("Invalid ev.queue_id = %d\n", ev.queue_id);
1026 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1027 uint8_t out_sched_type)
1029 const unsigned int total_events = MAX_EVENTS;
1033 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1034 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1035 &nr_ports), "Port count get failed");
1037 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1039 uint32_t queue_count;
1040 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1041 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1042 &queue_count), "Queue count get failed");
1043 if (queue_count < 2 || !nr_ports) {
1044 printf("%s: Not enough queues=%d ports=%d or workers=%d\n",
1045 __func__, queue_count, nr_ports,
1046 rte_lcore_count() - 1);
1047 return TEST_SUCCESS;
1050 /* Injects events with m->seqn=0 to total_events */
1051 ret = inject_events(
1053 RTE_EVENT_TYPE_CPU /* event_type */,
1054 0 /* sub_event_type (stage 0) */,
1058 total_events /* events */);
1062 ret = launch_workers_and_wait(worker_group_based_pipeline,
1063 worker_group_based_pipeline,
1064 total_events, nr_ports, out_sched_type);
1068 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1069 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1070 /* Check the events order maintained or not */
1071 return seqn_list_check(total_events);
1073 return TEST_SUCCESS;
1077 test_multi_port_queue_ordered_to_atomic(void)
1079 /* Ingress event order test */
1080 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1081 RTE_SCHED_TYPE_ATOMIC);
1085 test_multi_port_queue_ordered_to_ordered(void)
1087 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1088 RTE_SCHED_TYPE_ORDERED);
1092 test_multi_port_queue_ordered_to_parallel(void)
1094 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1095 RTE_SCHED_TYPE_PARALLEL);
1099 test_multi_port_queue_atomic_to_atomic(void)
1101 /* Ingress event order test */
1102 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1103 RTE_SCHED_TYPE_ATOMIC);
1107 test_multi_port_queue_atomic_to_ordered(void)
1109 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1110 RTE_SCHED_TYPE_ORDERED);
1114 test_multi_port_queue_atomic_to_parallel(void)
1116 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1117 RTE_SCHED_TYPE_PARALLEL);
1121 test_multi_port_queue_parallel_to_atomic(void)
1123 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1124 RTE_SCHED_TYPE_ATOMIC);
1128 test_multi_port_queue_parallel_to_ordered(void)
1130 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1131 RTE_SCHED_TYPE_ORDERED);
1135 test_multi_port_queue_parallel_to_parallel(void)
1137 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1138 RTE_SCHED_TYPE_PARALLEL);
1142 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1144 struct test_core_param *param = arg;
1145 struct rte_event ev;
1146 uint16_t valid_event;
1147 uint8_t port = param->port;
1148 rte_atomic32_t *total_events = param->total_events;
1150 while (rte_atomic32_read(total_events) > 0) {
1151 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1155 if (ev.sub_event_type == 255) { /* last stage */
1156 rte_pktmbuf_free(ev.mbuf);
1157 rte_atomic32_sub(total_events, 1);
1159 ev.event_type = RTE_EVENT_TYPE_CPU;
1160 ev.sub_event_type++;
1162 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1163 ev.op = RTE_EVENT_OP_FORWARD;
1164 rte_event_enqueue_burst(evdev, port, &ev, 1);
1171 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1176 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1177 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1178 &nr_ports), "Port count get failed");
1179 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1182 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
1183 nr_ports, rte_lcore_count() - 1);
1184 return TEST_SUCCESS;
1187 /* Injects events with m->seqn=0 to total_events */
1188 ret = inject_events(
1190 RTE_EVENT_TYPE_CPU /* event_type */,
1191 0 /* sub_event_type (stage 0) */,
1192 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1195 MAX_EVENTS /* events */);
1199 return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1200 0xff /* invalid */);
1203 /* Flow based pipeline with maximum stages with random sched type */
1205 test_multi_port_flow_max_stages_random_sched_type(void)
1207 return launch_multi_port_max_stages_random_sched_type(
1208 worker_flow_based_pipeline_max_stages_rand_sched_type);
1212 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1214 struct test_core_param *param = arg;
1215 struct rte_event ev;
1216 uint16_t valid_event;
1217 uint8_t port = param->port;
1218 uint32_t queue_count;
1219 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1220 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1221 &queue_count), "Queue count get failed");
1222 uint8_t nr_queues = queue_count;
1223 rte_atomic32_t *total_events = param->total_events;
1225 while (rte_atomic32_read(total_events) > 0) {
1226 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1230 if (ev.queue_id == nr_queues - 1) { /* last stage */
1231 rte_pktmbuf_free(ev.mbuf);
1232 rte_atomic32_sub(total_events, 1);
1234 ev.event_type = RTE_EVENT_TYPE_CPU;
1237 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1238 ev.op = RTE_EVENT_OP_FORWARD;
1239 rte_event_enqueue_burst(evdev, port, &ev, 1);
1245 /* Queue based pipeline with maximum stages with random sched type */
1247 test_multi_port_queue_max_stages_random_sched_type(void)
1249 return launch_multi_port_max_stages_random_sched_type(
1250 worker_queue_based_pipeline_max_stages_rand_sched_type);
1254 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1256 struct test_core_param *param = arg;
1257 struct rte_event ev;
1258 uint16_t valid_event;
1259 uint8_t port = param->port;
1260 uint32_t queue_count;
1261 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1262 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1263 &queue_count), "Queue count get failed");
1264 uint8_t nr_queues = queue_count;
1265 rte_atomic32_t *total_events = param->total_events;
1267 while (rte_atomic32_read(total_events) > 0) {
1268 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1272 if (ev.queue_id == nr_queues - 1) { /* Last stage */
1273 rte_pktmbuf_free(ev.mbuf);
1274 rte_atomic32_sub(total_events, 1);
1276 ev.event_type = RTE_EVENT_TYPE_CPU;
1278 ev.sub_event_type = rte_rand() % 256;
1280 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1281 ev.op = RTE_EVENT_OP_FORWARD;
1282 rte_event_enqueue_burst(evdev, port, &ev, 1);
1288 /* Queue and flow based pipeline with maximum stages with random sched type */
1290 test_multi_port_mixed_max_stages_random_sched_type(void)
1292 return launch_multi_port_max_stages_random_sched_type(
1293 worker_mixed_pipeline_max_stages_rand_sched_type);
1297 worker_ordered_flow_producer(void *arg)
1299 struct test_core_param *param = arg;
1300 uint8_t port = param->port;
1304 while (counter < NUM_PACKETS) {
1305 m = rte_pktmbuf_alloc(eventdev_test_mempool);
1309 m->seqn = counter++;
1311 struct rte_event ev = {.event = 0, .u64 = 0};
1313 ev.flow_id = 0x1; /* Generate a fat flow */
1314 ev.sub_event_type = 0;
1315 /* Inject the new event */
1316 ev.op = RTE_EVENT_OP_NEW;
1317 ev.event_type = RTE_EVENT_TYPE_CPU;
1318 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1321 rte_event_enqueue_burst(evdev, port, &ev, 1);
1328 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1332 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1333 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1334 &nr_ports), "Port count get failed");
1335 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1337 if (rte_lcore_count() < 3 || nr_ports < 2) {
1338 printf("### Not enough cores for %s test.\n", __func__);
1339 return TEST_SUCCESS;
1342 launch_workers_and_wait(worker_ordered_flow_producer, fn,
1343 NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
1344 /* Check the events order maintained or not */
1345 return seqn_list_check(NUM_PACKETS);
1348 /* Flow based producer consumer ingress order test */
1350 test_flow_producer_consumer_ingress_order_test(void)
1352 return test_producer_consumer_ingress_order_test(
1353 worker_flow_based_pipeline);
1356 /* Queue based producer consumer ingress order test */
1358 test_queue_producer_consumer_ingress_order_test(void)
1360 return test_producer_consumer_ingress_order_test(
1361 worker_group_based_pipeline);
1364 static struct unit_test_suite eventdev_octeontx_testsuite = {
1365 .suite_name = "eventdev octeontx unit test suite",
1366 .setup = testsuite_setup,
1367 .teardown = testsuite_teardown,
1368 .unit_test_cases = {
1369 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1370 test_simple_enqdeq_ordered),
1371 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1372 test_simple_enqdeq_atomic),
1373 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1374 test_simple_enqdeq_parallel),
1375 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1376 test_multi_queue_enq_single_port_deq),
1377 TEST_CASE_ST(eventdev_setup_priority, eventdev_teardown,
1378 test_multi_queue_priority),
1379 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1380 test_multi_queue_enq_multi_port_deq),
1381 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1382 test_queue_to_port_single_link),
1383 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1384 test_queue_to_port_multi_link),
1385 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1386 test_multi_port_flow_ordered_to_atomic),
1387 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1388 test_multi_port_flow_ordered_to_ordered),
1389 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1390 test_multi_port_flow_ordered_to_parallel),
1391 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1392 test_multi_port_flow_atomic_to_atomic),
1393 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1394 test_multi_port_flow_atomic_to_ordered),
1395 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1396 test_multi_port_flow_atomic_to_parallel),
1397 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1398 test_multi_port_flow_parallel_to_atomic),
1399 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1400 test_multi_port_flow_parallel_to_ordered),
1401 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1402 test_multi_port_flow_parallel_to_parallel),
1403 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1404 test_multi_port_queue_ordered_to_atomic),
1405 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1406 test_multi_port_queue_ordered_to_ordered),
1407 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1408 test_multi_port_queue_ordered_to_parallel),
1409 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1410 test_multi_port_queue_atomic_to_atomic),
1411 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1412 test_multi_port_queue_atomic_to_ordered),
1413 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1414 test_multi_port_queue_atomic_to_parallel),
1415 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1416 test_multi_port_queue_parallel_to_atomic),
1417 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1418 test_multi_port_queue_parallel_to_ordered),
1419 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1420 test_multi_port_queue_parallel_to_parallel),
1421 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1422 test_multi_port_flow_max_stages_random_sched_type),
1423 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1424 test_multi_port_queue_max_stages_random_sched_type),
1425 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1426 test_multi_port_mixed_max_stages_random_sched_type),
1427 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1428 test_flow_producer_consumer_ingress_order_test),
1429 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1430 test_queue_producer_consumer_ingress_order_test),
1431 /* Tests with dequeue timeout */
1432 TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
1433 test_multi_port_flow_ordered_to_atomic),
1434 TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
1435 test_multi_port_queue_ordered_to_atomic),
1436 TEST_CASES_END() /**< NULL terminate unit test array */
1441 test_eventdev_octeontx(void)
1443 return unit_test_suite_runner(&eventdev_octeontx_testsuite);
1446 REGISTER_TEST_COMMAND(eventdev_octeontx_autotest, test_eventdev_octeontx);