4 * Copyright(c) 2017 Cavium networks. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_atomic.h>
34 #include <rte_common.h>
35 #include <rte_cycles.h>
36 #include <rte_debug.h>
38 #include <rte_ethdev.h>
39 #include <rte_eventdev.h>
40 #include <rte_hexdump.h>
42 #include <rte_malloc.h>
43 #include <rte_memcpy.h>
44 #include <rte_launch.h>
45 #include <rte_lcore.h>
46 #include <rte_per_lcore.h>
47 #include <rte_random.h>
51 #define NUM_PACKETS (1 << 18)
52 #define MAX_EVENTS (16 * 1024)
55 static struct rte_mempool *eventdev_test_mempool;
60 uint8_t sub_event_type;
66 static uint32_t seqn_list_index;
67 static int seqn_list[NUM_PACKETS];
72 RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
73 memset(seqn_list, 0, sizeof(seqn_list));
78 seqn_list_update(int val)
80 if (seqn_list_index >= NUM_PACKETS)
83 seqn_list[seqn_list_index++] = val;
89 seqn_list_check(int limit)
93 for (i = 0; i < limit; i++) {
94 if (seqn_list[i] != i) {
95 printf("Seqn mismatch %d %d\n", seqn_list[i], i);
102 struct test_core_param {
103 rte_atomic32_t *total_events;
104 uint64_t dequeue_tmo_ticks;
110 testsuite_setup(void)
112 const char *eventdev_name = "event_octeontx";
114 evdev = rte_event_dev_get_dev_id(eventdev_name);
116 printf("%d: Eventdev %s not found - creating.\n",
117 __LINE__, eventdev_name);
118 if (rte_vdev_init(eventdev_name, NULL) < 0) {
119 printf("Error creating eventdev %s\n", eventdev_name);
122 evdev = rte_event_dev_get_dev_id(eventdev_name);
124 printf("Error finding newly created eventdev\n");
133 testsuite_teardown(void)
135 rte_event_dev_close(evdev);
139 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
140 struct rte_event_dev_info *info)
142 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
143 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
144 dev_conf->nb_event_ports = info->max_event_ports;
145 dev_conf->nb_event_queues = info->max_event_queues;
146 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
147 dev_conf->nb_event_port_dequeue_depth =
148 info->max_event_port_dequeue_depth;
149 dev_conf->nb_event_port_enqueue_depth =
150 info->max_event_port_enqueue_depth;
151 dev_conf->nb_event_port_enqueue_depth =
152 info->max_event_port_enqueue_depth;
153 dev_conf->nb_events_limit =
154 info->max_num_events;
158 TEST_EVENTDEV_SETUP_DEFAULT,
159 TEST_EVENTDEV_SETUP_PRIORITY,
160 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
164 _eventdev_setup(int mode)
167 struct rte_event_dev_config dev_conf;
168 struct rte_event_dev_info info;
169 const char *pool_name = "evdev_octeontx_test_pool";
171 /* Create and destrory pool for each test case to make it standalone */
172 eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
174 0 /*MBUF_CACHE_SIZE*/,
176 512, /* Use very small mbufs */
178 if (!eventdev_test_mempool) {
179 printf("ERROR creating mempool\n");
183 ret = rte_event_dev_info_get(evdev, &info);
184 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
185 TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
186 "max_num_events=%d < max_events=%d",
187 info.max_num_events, MAX_EVENTS);
189 devconf_set_default_sane_values(&dev_conf, &info);
190 if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
191 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
193 ret = rte_event_dev_configure(evdev, &dev_conf);
194 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
196 if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
197 if (rte_event_queue_count(evdev) > 8) {
198 printf("test expects the unique priority per queue\n");
202 /* Configure event queues(0 to n) with
203 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
204 * RTE_EVENT_DEV_PRIORITY_LOWEST
206 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
207 rte_event_queue_count(evdev);
208 for (i = 0; i < rte_event_queue_count(evdev); i++) {
209 struct rte_event_queue_conf queue_conf;
211 ret = rte_event_queue_default_conf_get(evdev, i,
213 TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d", i);
214 queue_conf.priority = i * step;
215 ret = rte_event_queue_setup(evdev, i, &queue_conf);
216 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
220 /* Configure event queues with default priority */
221 for (i = 0; i < rte_event_queue_count(evdev); i++) {
222 ret = rte_event_queue_setup(evdev, i, NULL);
223 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
226 /* Configure event ports */
227 for (i = 0; i < rte_event_port_count(evdev); i++) {
228 ret = rte_event_port_setup(evdev, i, NULL);
229 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
230 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
231 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", i);
234 ret = rte_event_dev_start(evdev);
235 TEST_ASSERT_SUCCESS(ret, "Failed to start device");
243 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
247 eventdev_setup_priority(void)
249 return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
253 eventdev_setup_dequeue_timeout(void)
255 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
259 eventdev_teardown(void)
261 rte_event_dev_stop(evdev);
262 rte_mempool_free(eventdev_test_mempool);
266 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
267 uint32_t flow_id, uint8_t event_type,
268 uint8_t sub_event_type, uint8_t sched_type,
269 uint8_t queue, uint8_t port)
271 struct event_attr *attr;
273 /* Store the event attributes in mbuf for future reference */
274 attr = rte_pktmbuf_mtod(m, struct event_attr *);
275 attr->flow_id = flow_id;
276 attr->event_type = event_type;
277 attr->sub_event_type = sub_event_type;
278 attr->sched_type = sched_type;
282 ev->flow_id = flow_id;
283 ev->sub_event_type = sub_event_type;
284 ev->event_type = event_type;
285 /* Inject the new event */
286 ev->op = RTE_EVENT_OP_NEW;
287 ev->sched_type = sched_type;
288 ev->queue_id = queue;
293 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
294 uint8_t sched_type, uint8_t queue, uint8_t port,
300 for (i = 0; i < events; i++) {
301 struct rte_event ev = {.event = 0, .u64 = 0};
303 m = rte_pktmbuf_alloc(eventdev_test_mempool);
304 TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
307 update_event_and_validation_attr(m, &ev, flow_id, event_type,
308 sub_event_type, sched_type, queue, port);
309 rte_event_enqueue_burst(evdev, port, &ev, 1);
315 check_excess_events(uint8_t port)
318 uint16_t valid_event;
321 /* Check for excess events, try for a few times and exit */
322 for (i = 0; i < 32; i++) {
323 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
325 TEST_ASSERT_SUCCESS(valid_event, "Unexpected valid event=%d",
332 generate_random_events(const unsigned int total_events)
334 struct rte_event_dev_info info;
338 ret = rte_event_dev_info_get(evdev, &info);
339 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
340 for (i = 0; i < total_events; i++) {
342 rte_rand() % info.max_event_queue_flows /*flow_id */,
343 rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
344 rte_rand() % 256 /* sub_event_type */,
345 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
346 rte_rand() % rte_event_queue_count(evdev) /* queue */,
357 validate_event(struct rte_event *ev)
359 struct event_attr *attr;
361 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
362 TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
363 "flow_id mismatch enq=%d deq =%d",
364 attr->flow_id, ev->flow_id);
365 TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
366 "event_type mismatch enq=%d deq =%d",
367 attr->event_type, ev->event_type);
368 TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
369 "sub_event_type mismatch enq=%d deq =%d",
370 attr->sub_event_type, ev->sub_event_type);
371 TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
372 "sched_type mismatch enq=%d deq =%d",
373 attr->sched_type, ev->sched_type);
374 TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
375 "queue mismatch enq=%d deq =%d",
376 attr->queue, ev->queue_id);
380 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
381 struct rte_event *ev);
384 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
387 uint16_t valid_event;
388 uint32_t events = 0, forward_progress_cnt = 0, index = 0;
392 if (++forward_progress_cnt > UINT16_MAX) {
393 printf("Detected deadlock\n");
397 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
401 forward_progress_cnt = 0;
402 ret = validate_event(&ev);
407 ret = fn(index, port, &ev);
408 TEST_ASSERT_SUCCESS(ret,
409 "Failed to validate test specific event");
414 rte_pktmbuf_free(ev.mbuf);
415 if (++events >= total_events)
419 return check_excess_events(port);
423 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
426 TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d", index,
432 test_simple_enqdeq(uint8_t sched_type)
436 ret = inject_events(0 /*flow_id */,
437 RTE_EVENT_TYPE_CPU /* event_type */,
438 0 /* sub_event_type */,
446 return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
450 test_simple_enqdeq_ordered(void)
452 return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
456 test_simple_enqdeq_atomic(void)
458 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
462 test_simple_enqdeq_parallel(void)
464 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
468 * Generate a prescribed number of events and spread them across available
469 * queues. On dequeue, using single event port(port 0) verify the enqueued
473 test_multi_queue_enq_single_port_deq(void)
477 ret = generate_random_events(MAX_EVENTS);
481 return consume_events(0 /* port */, MAX_EVENTS, NULL);
485 * Inject 0..MAX_EVENTS events over 0..rte_event_queue_count() with modulus
488 * For example, Inject 32 events over 0..7 queues
489 * enqueue events 0, 8, 16, 24 in queue 0
490 * enqueue events 1, 9, 17, 25 in queue 1
493 * enqueue events 7, 15, 23, 31 in queue 7
495 * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
496 * order from queue0(highest priority) to queue7(lowest_priority)
499 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
501 uint32_t range = MAX_EVENTS / rte_event_queue_count(evdev);
502 uint32_t expected_val = (index % range) * rte_event_queue_count(evdev);
504 expected_val += ev->queue_id;
506 TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
507 "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
508 ev->mbuf->seqn, index, expected_val, range,
509 rte_event_queue_count(evdev), MAX_EVENTS);
514 test_multi_queue_priority(void)
518 int i, max_evts_roundoff;
520 /* See validate_queue_priority() comments for priority validate logic */
521 max_evts_roundoff = MAX_EVENTS / rte_event_queue_count(evdev);
522 max_evts_roundoff *= rte_event_queue_count(evdev);
524 for (i = 0; i < max_evts_roundoff; i++) {
525 struct rte_event ev = {.event = 0, .u64 = 0};
527 m = rte_pktmbuf_alloc(eventdev_test_mempool);
528 TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
531 queue = i % rte_event_queue_count(evdev);
532 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
533 0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
534 rte_event_enqueue_burst(evdev, 0, &ev, 1);
537 return consume_events(0, max_evts_roundoff, validate_queue_priority);
541 worker_multi_port_fn(void *arg)
543 struct test_core_param *param = arg;
545 uint16_t valid_event;
546 uint8_t port = param->port;
547 rte_atomic32_t *total_events = param->total_events;
550 while (rte_atomic32_read(total_events) > 0) {
551 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
555 ret = validate_event(&ev);
556 TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
557 rte_pktmbuf_free(ev.mbuf);
558 rte_atomic32_sub(total_events, 1);
564 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
566 uint64_t cycles, print_cycles;
568 print_cycles = cycles = rte_get_timer_cycles();
569 while (rte_eal_get_lcore_state(lcore) != FINISHED) {
570 uint64_t new_cycles = rte_get_timer_cycles();
572 if (new_cycles - print_cycles > rte_get_timer_hz()) {
573 printf("\r%s: events %d\n", __func__,
574 rte_atomic32_read(count));
575 print_cycles = new_cycles;
577 if (new_cycles - cycles > rte_get_timer_hz() * 10) {
578 printf("%s: No schedules for seconds, deadlock (%d)\n",
580 rte_atomic32_read(count));
581 rte_event_dev_dump(evdev, stdout);
586 rte_eal_mp_wait_lcore();
592 launch_workers_and_wait(int (*master_worker)(void *),
593 int (*slave_workers)(void *), uint32_t total_events,
594 uint8_t nb_workers, uint8_t sched_type)
599 struct test_core_param *param;
600 rte_atomic32_t atomic_total_events;
601 uint64_t dequeue_tmo_ticks;
606 rte_atomic32_set(&atomic_total_events, total_events);
609 param = malloc(sizeof(struct test_core_param) * nb_workers);
613 ret = rte_event_dequeue_timeout_ticks(evdev,
614 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
618 param[0].total_events = &atomic_total_events;
619 param[0].sched_type = sched_type;
621 param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
624 w_lcore = rte_get_next_lcore(
628 rte_eal_remote_launch(master_worker, ¶m[0], w_lcore);
630 for (port = 1; port < nb_workers; port++) {
631 param[port].total_events = &atomic_total_events;
632 param[port].sched_type = sched_type;
633 param[port].port = port;
634 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
636 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
637 rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore);
640 ret = wait_workers_to_join(w_lcore, &atomic_total_events);
646 * Generate a prescribed number of events and spread them across available
647 * queues. Dequeue the events through multiple ports and verify the enqueued
651 test_multi_queue_enq_multi_port_deq(void)
653 const unsigned int total_events = MAX_EVENTS;
657 ret = generate_random_events(total_events);
661 nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
664 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
665 rte_event_port_count(evdev), rte_lcore_count() - 1);
669 return launch_workers_and_wait(worker_multi_port_fn,
670 worker_multi_port_fn, total_events,
671 nr_ports, 0xff /* invalid */);
675 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
676 struct rte_event *ev)
679 TEST_ASSERT_EQUAL(port, ev->queue_id,
680 "queue mismatch enq=%d deq =%d",
686 * Link queue x to port x and check correctness of link by checking
687 * queue_id == x on dequeue on the specific port x
690 test_queue_to_port_single_link(void)
692 int i, nr_links, ret;
694 /* Unlink all connections that created in eventdev_setup */
695 for (i = 0; i < rte_event_port_count(evdev); i++) {
696 ret = rte_event_port_unlink(evdev, i, NULL, 0);
697 TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d", i);
700 nr_links = RTE_MIN(rte_event_port_count(evdev),
701 rte_event_queue_count(evdev));
702 const unsigned int total_events = MAX_EVENTS / nr_links;
704 /* Link queue x to port x and inject events to queue x through port x */
705 for (i = 0; i < nr_links; i++) {
706 uint8_t queue = (uint8_t)i;
708 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
709 TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
713 rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
714 rte_rand() % 256 /* sub_event_type */,
715 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
718 total_events /* events */);
723 /* Verify the events generated from correct queue */
724 for (i = 0; i < nr_links; i++) {
725 ret = consume_events(i /* port */, total_events,
726 validate_queue_to_port_single_link);
735 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
736 struct rte_event *ev)
739 TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
740 "queue mismatch enq=%d deq =%d",
746 * Link all even number of queues to port 0 and all odd number of queues to
747 * port 1 and verify the link connection on dequeue
750 test_queue_to_port_multi_link(void)
752 int ret, port0_events = 0, port1_events = 0;
753 uint8_t nr_queues, nr_ports, queue, port;
755 nr_queues = rte_event_queue_count(evdev);
756 nr_ports = rte_event_port_count(evdev);
759 printf("%s: Not enough ports to test ports=%d\n",
764 /* Unlink all connections that created in eventdev_setup */
765 for (port = 0; port < nr_ports; port++) {
766 ret = rte_event_port_unlink(evdev, port, NULL, 0);
767 TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
771 const unsigned int total_events = MAX_EVENTS / nr_queues;
773 /* Link all even number of queues to port0 and odd numbers to port 1*/
774 for (queue = 0; queue < nr_queues; queue++) {
776 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
777 TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
782 rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
783 rte_rand() % 256 /* sub_event_type */,
784 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
787 total_events /* events */);
792 port0_events += total_events;
794 port1_events += total_events;
797 ret = consume_events(0 /* port */, port0_events,
798 validate_queue_to_port_multi_link);
801 ret = consume_events(1 /* port */, port1_events,
802 validate_queue_to_port_multi_link);
810 worker_flow_based_pipeline(void *arg)
812 struct test_core_param *param = arg;
814 uint16_t valid_event;
815 uint8_t port = param->port;
816 uint8_t new_sched_type = param->sched_type;
817 rte_atomic32_t *total_events = param->total_events;
818 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
820 while (rte_atomic32_read(total_events) > 0) {
821 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
826 /* Events from stage 0 */
827 if (ev.sub_event_type == 0) {
828 /* Move to atomic flow to maintain the ordering */
830 ev.event_type = RTE_EVENT_TYPE_CPU;
831 ev.sub_event_type = 1; /* stage 1 */
832 ev.sched_type = new_sched_type;
833 ev.op = RTE_EVENT_OP_FORWARD;
834 rte_event_enqueue_burst(evdev, port, &ev, 1);
835 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
836 if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
837 rte_pktmbuf_free(ev.mbuf);
838 rte_atomic32_sub(total_events, 1);
840 printf("Failed to update seqn_list\n");
844 printf("Invalid ev.sub_event_type = %d\n",
853 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
854 uint8_t out_sched_type)
856 const unsigned int total_events = MAX_EVENTS;
860 nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
863 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
864 rte_event_port_count(evdev), rte_lcore_count() - 1);
868 /* Injects events with m->seqn=0 to total_events */
871 RTE_EVENT_TYPE_CPU /* event_type */,
872 0 /* sub_event_type (stage 0) */,
876 total_events /* events */);
880 ret = launch_workers_and_wait(worker_flow_based_pipeline,
881 worker_flow_based_pipeline,
882 total_events, nr_ports, out_sched_type);
886 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
887 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
888 /* Check the events order maintained or not */
889 return seqn_list_check(total_events);
895 /* Multi port ordered to atomic transaction */
897 test_multi_port_flow_ordered_to_atomic(void)
899 /* Ingress event order test */
900 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
901 RTE_SCHED_TYPE_ATOMIC);
905 test_multi_port_flow_ordered_to_ordered(void)
907 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
908 RTE_SCHED_TYPE_ORDERED);
912 test_multi_port_flow_ordered_to_parallel(void)
914 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
915 RTE_SCHED_TYPE_PARALLEL);
919 test_multi_port_flow_atomic_to_atomic(void)
921 /* Ingress event order test */
922 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
923 RTE_SCHED_TYPE_ATOMIC);
927 test_multi_port_flow_atomic_to_ordered(void)
929 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
930 RTE_SCHED_TYPE_ORDERED);
934 test_multi_port_flow_atomic_to_parallel(void)
936 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
937 RTE_SCHED_TYPE_PARALLEL);
941 test_multi_port_flow_parallel_to_atomic(void)
943 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
944 RTE_SCHED_TYPE_ATOMIC);
948 test_multi_port_flow_parallel_to_ordered(void)
950 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
951 RTE_SCHED_TYPE_ORDERED);
955 test_multi_port_flow_parallel_to_parallel(void)
957 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
958 RTE_SCHED_TYPE_PARALLEL);
962 worker_group_based_pipeline(void *arg)
964 struct test_core_param *param = arg;
966 uint16_t valid_event;
967 uint8_t port = param->port;
968 uint8_t new_sched_type = param->sched_type;
969 rte_atomic32_t *total_events = param->total_events;
970 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
972 while (rte_atomic32_read(total_events) > 0) {
973 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
978 /* Events from stage 0(group 0) */
979 if (ev.queue_id == 0) {
980 /* Move to atomic flow to maintain the ordering */
982 ev.event_type = RTE_EVENT_TYPE_CPU;
983 ev.sched_type = new_sched_type;
984 ev.queue_id = 1; /* Stage 1*/
985 ev.op = RTE_EVENT_OP_FORWARD;
986 rte_event_enqueue_burst(evdev, port, &ev, 1);
987 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
988 if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
989 rte_pktmbuf_free(ev.mbuf);
990 rte_atomic32_sub(total_events, 1);
992 printf("Failed to update seqn_list\n");
996 printf("Invalid ev.queue_id = %d\n", ev.queue_id);
1006 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1007 uint8_t out_sched_type)
1009 const unsigned int total_events = MAX_EVENTS;
1013 nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
1015 if (rte_event_queue_count(evdev) < 2 || !nr_ports) {
1016 printf("%s: Not enough queues=%d ports=%d or workers=%d\n",
1017 __func__, rte_event_queue_count(evdev),
1018 rte_event_port_count(evdev), rte_lcore_count() - 1);
1019 return TEST_SUCCESS;
1022 /* Injects events with m->seqn=0 to total_events */
1023 ret = inject_events(
1025 RTE_EVENT_TYPE_CPU /* event_type */,
1026 0 /* sub_event_type (stage 0) */,
1030 total_events /* events */);
1034 ret = launch_workers_and_wait(worker_group_based_pipeline,
1035 worker_group_based_pipeline,
1036 total_events, nr_ports, out_sched_type);
1040 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1041 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1042 /* Check the events order maintained or not */
1043 return seqn_list_check(total_events);
1045 return TEST_SUCCESS;
1049 test_multi_port_queue_ordered_to_atomic(void)
1051 /* Ingress event order test */
1052 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1053 RTE_SCHED_TYPE_ATOMIC);
1057 test_multi_port_queue_ordered_to_ordered(void)
1059 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1060 RTE_SCHED_TYPE_ORDERED);
1064 test_multi_port_queue_ordered_to_parallel(void)
1066 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1067 RTE_SCHED_TYPE_PARALLEL);
1071 test_multi_port_queue_atomic_to_atomic(void)
1073 /* Ingress event order test */
1074 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1075 RTE_SCHED_TYPE_ATOMIC);
1079 test_multi_port_queue_atomic_to_ordered(void)
1081 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1082 RTE_SCHED_TYPE_ORDERED);
1086 test_multi_port_queue_atomic_to_parallel(void)
1088 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1089 RTE_SCHED_TYPE_PARALLEL);
1093 test_multi_port_queue_parallel_to_atomic(void)
1095 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1096 RTE_SCHED_TYPE_ATOMIC);
1100 test_multi_port_queue_parallel_to_ordered(void)
1102 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1103 RTE_SCHED_TYPE_ORDERED);
1107 test_multi_port_queue_parallel_to_parallel(void)
1109 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1110 RTE_SCHED_TYPE_PARALLEL);
1114 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1116 struct test_core_param *param = arg;
1117 struct rte_event ev;
1118 uint16_t valid_event;
1119 uint8_t port = param->port;
1120 rte_atomic32_t *total_events = param->total_events;
1122 while (rte_atomic32_read(total_events) > 0) {
1123 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1127 if (ev.sub_event_type == 255) { /* last stage */
1128 rte_pktmbuf_free(ev.mbuf);
1129 rte_atomic32_sub(total_events, 1);
1131 ev.event_type = RTE_EVENT_TYPE_CPU;
1132 ev.sub_event_type++;
1134 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1135 ev.op = RTE_EVENT_OP_FORWARD;
1136 rte_event_enqueue_burst(evdev, port, &ev, 1);
1143 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1148 nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
1151 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
1152 rte_event_port_count(evdev), rte_lcore_count() - 1);
1153 return TEST_SUCCESS;
1156 /* Injects events with m->seqn=0 to total_events */
1157 ret = inject_events(
1159 RTE_EVENT_TYPE_CPU /* event_type */,
1160 0 /* sub_event_type (stage 0) */,
1161 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1164 MAX_EVENTS /* events */);
1168 return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1169 0xff /* invalid */);
1172 /* Flow based pipeline with maximum stages with random sched type */
1174 test_multi_port_flow_max_stages_random_sched_type(void)
1176 return launch_multi_port_max_stages_random_sched_type(
1177 worker_flow_based_pipeline_max_stages_rand_sched_type);
1181 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1183 struct test_core_param *param = arg;
1184 struct rte_event ev;
1185 uint16_t valid_event;
1186 uint8_t port = param->port;
1187 uint8_t nr_queues = rte_event_queue_count(evdev);
1188 rte_atomic32_t *total_events = param->total_events;
1190 while (rte_atomic32_read(total_events) > 0) {
1191 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1195 if (ev.queue_id == nr_queues - 1) { /* last stage */
1196 rte_pktmbuf_free(ev.mbuf);
1197 rte_atomic32_sub(total_events, 1);
1199 ev.event_type = RTE_EVENT_TYPE_CPU;
1202 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1203 ev.op = RTE_EVENT_OP_FORWARD;
1204 rte_event_enqueue_burst(evdev, port, &ev, 1);
1210 /* Queue based pipeline with maximum stages with random sched type */
1212 test_multi_port_queue_max_stages_random_sched_type(void)
1214 return launch_multi_port_max_stages_random_sched_type(
1215 worker_queue_based_pipeline_max_stages_rand_sched_type);
1219 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1221 struct test_core_param *param = arg;
1222 struct rte_event ev;
1223 uint16_t valid_event;
1224 uint8_t port = param->port;
1225 uint8_t nr_queues = rte_event_queue_count(evdev);
1226 rte_atomic32_t *total_events = param->total_events;
1228 while (rte_atomic32_read(total_events) > 0) {
1229 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1233 if (ev.queue_id == nr_queues - 1) { /* Last stage */
1234 rte_pktmbuf_free(ev.mbuf);
1235 rte_atomic32_sub(total_events, 1);
1237 ev.event_type = RTE_EVENT_TYPE_CPU;
1239 ev.sub_event_type = rte_rand() % 256;
1241 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1242 ev.op = RTE_EVENT_OP_FORWARD;
1243 rte_event_enqueue_burst(evdev, port, &ev, 1);
1249 /* Queue and flow based pipeline with maximum stages with random sched type */
1251 test_multi_port_mixed_max_stages_random_sched_type(void)
1253 return launch_multi_port_max_stages_random_sched_type(
1254 worker_mixed_pipeline_max_stages_rand_sched_type);
1258 worker_ordered_flow_producer(void *arg)
1260 struct test_core_param *param = arg;
1261 uint8_t port = param->port;
1265 while (counter < NUM_PACKETS) {
1266 m = rte_pktmbuf_alloc(eventdev_test_mempool);
1270 m->seqn = counter++;
1272 struct rte_event ev = {.event = 0, .u64 = 0};
1274 ev.flow_id = 0x1; /* Generate a fat flow */
1275 ev.sub_event_type = 0;
1276 /* Inject the new event */
1277 ev.op = RTE_EVENT_OP_NEW;
1278 ev.event_type = RTE_EVENT_TYPE_CPU;
1279 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1282 rte_event_enqueue_burst(evdev, port, &ev, 1);
1289 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1293 nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
1295 if (rte_lcore_count() < 3 || nr_ports < 2) {
1296 printf("### Not enough cores for %s test.\n", __func__);
1297 return TEST_SUCCESS;
1300 launch_workers_and_wait(worker_ordered_flow_producer, fn,
1301 NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
1302 /* Check the events order maintained or not */
1303 return seqn_list_check(NUM_PACKETS);
1306 /* Flow based producer consumer ingress order test */
1308 test_flow_producer_consumer_ingress_order_test(void)
1310 return test_producer_consumer_ingress_order_test(
1311 worker_flow_based_pipeline);
1314 /* Queue based producer consumer ingress order test */
1316 test_queue_producer_consumer_ingress_order_test(void)
1318 return test_producer_consumer_ingress_order_test(
1319 worker_group_based_pipeline);
1322 static struct unit_test_suite eventdev_octeontx_testsuite = {
1323 .suite_name = "eventdev octeontx unit test suite",
1324 .setup = testsuite_setup,
1325 .teardown = testsuite_teardown,
1326 .unit_test_cases = {
1327 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1328 test_simple_enqdeq_ordered),
1329 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1330 test_simple_enqdeq_atomic),
1331 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1332 test_simple_enqdeq_parallel),
1333 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1334 test_multi_queue_enq_single_port_deq),
1335 TEST_CASE_ST(eventdev_setup_priority, eventdev_teardown,
1336 test_multi_queue_priority),
1337 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1338 test_multi_queue_enq_multi_port_deq),
1339 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1340 test_queue_to_port_single_link),
1341 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1342 test_queue_to_port_multi_link),
1343 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1344 test_multi_port_flow_ordered_to_atomic),
1345 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1346 test_multi_port_flow_ordered_to_ordered),
1347 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1348 test_multi_port_flow_ordered_to_parallel),
1349 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1350 test_multi_port_flow_atomic_to_atomic),
1351 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1352 test_multi_port_flow_atomic_to_ordered),
1353 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1354 test_multi_port_flow_atomic_to_parallel),
1355 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1356 test_multi_port_flow_parallel_to_atomic),
1357 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1358 test_multi_port_flow_parallel_to_ordered),
1359 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1360 test_multi_port_flow_parallel_to_parallel),
1361 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1362 test_multi_port_queue_ordered_to_atomic),
1363 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1364 test_multi_port_queue_ordered_to_ordered),
1365 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1366 test_multi_port_queue_ordered_to_parallel),
1367 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1368 test_multi_port_queue_atomic_to_atomic),
1369 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1370 test_multi_port_queue_atomic_to_ordered),
1371 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1372 test_multi_port_queue_atomic_to_parallel),
1373 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1374 test_multi_port_queue_parallel_to_atomic),
1375 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1376 test_multi_port_queue_parallel_to_ordered),
1377 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1378 test_multi_port_queue_parallel_to_parallel),
1379 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1380 test_multi_port_flow_max_stages_random_sched_type),
1381 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1382 test_multi_port_queue_max_stages_random_sched_type),
1383 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1384 test_multi_port_mixed_max_stages_random_sched_type),
1385 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1386 test_flow_producer_consumer_ingress_order_test),
1387 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1388 test_queue_producer_consumer_ingress_order_test),
1389 /* Tests with dequeue timeout */
1390 TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
1391 test_multi_port_flow_ordered_to_atomic),
1392 TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
1393 test_multi_port_queue_ordered_to_atomic),
1394 TEST_CASES_END() /**< NULL terminate unit test array */
1399 test_eventdev_octeontx(void)
1401 return unit_test_suite_runner(&eventdev_octeontx_testsuite);
1404 REGISTER_TEST_COMMAND(eventdev_octeontx_autotest, test_eventdev_octeontx);