4 * Copyright(c) 2017 Cavium, Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_atomic.h>
34 #include <rte_common.h>
35 #include <rte_cycles.h>
36 #include <rte_debug.h>
38 #include <rte_ethdev.h>
39 #include <rte_eventdev.h>
40 #include <rte_hexdump.h>
42 #include <rte_malloc.h>
43 #include <rte_memcpy.h>
44 #include <rte_launch.h>
45 #include <rte_lcore.h>
46 #include <rte_per_lcore.h>
47 #include <rte_random.h>
51 #define NUM_PACKETS (1 << 18)
52 #define MAX_EVENTS (16 * 1024)
55 static struct rte_mempool *eventdev_test_mempool;
60 uint8_t sub_event_type;
66 static uint32_t seqn_list_index;
67 static int seqn_list[NUM_PACKETS];
72 RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
73 memset(seqn_list, 0, sizeof(seqn_list));
78 seqn_list_update(int val)
80 if (seqn_list_index >= NUM_PACKETS)
83 seqn_list[seqn_list_index++] = val;
89 seqn_list_check(int limit)
93 for (i = 0; i < limit; i++) {
94 if (seqn_list[i] != i) {
95 printf("Seqn mismatch %d %d\n", seqn_list[i], i);
102 struct test_core_param {
103 rte_atomic32_t *total_events;
104 uint64_t dequeue_tmo_ticks;
110 testsuite_setup(void)
112 const char *eventdev_name = "event_octeontx";
114 evdev = rte_event_dev_get_dev_id(eventdev_name);
116 printf("%d: Eventdev %s not found - creating.\n",
117 __LINE__, eventdev_name);
118 if (rte_vdev_init(eventdev_name, NULL) < 0) {
119 printf("Error creating eventdev %s\n", eventdev_name);
122 evdev = rte_event_dev_get_dev_id(eventdev_name);
124 printf("Error finding newly created eventdev\n");
133 testsuite_teardown(void)
135 rte_event_dev_close(evdev);
139 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
140 struct rte_event_dev_info *info)
142 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
143 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
144 dev_conf->nb_event_ports = info->max_event_ports;
145 dev_conf->nb_event_queues = info->max_event_queues;
146 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
147 dev_conf->nb_event_port_dequeue_depth =
148 info->max_event_port_dequeue_depth;
149 dev_conf->nb_event_port_enqueue_depth =
150 info->max_event_port_enqueue_depth;
151 dev_conf->nb_event_port_enqueue_depth =
152 info->max_event_port_enqueue_depth;
153 dev_conf->nb_events_limit =
154 info->max_num_events;
158 TEST_EVENTDEV_SETUP_DEFAULT,
159 TEST_EVENTDEV_SETUP_PRIORITY,
160 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
164 _eventdev_setup(int mode)
167 struct rte_event_dev_config dev_conf;
168 struct rte_event_dev_info info;
169 const char *pool_name = "evdev_octeontx_test_pool";
171 /* Create and destrory pool for each test case to make it standalone */
172 eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
174 0 /*MBUF_CACHE_SIZE*/,
176 512, /* Use very small mbufs */
178 if (!eventdev_test_mempool) {
179 printf("ERROR creating mempool\n");
183 ret = rte_event_dev_info_get(evdev, &info);
184 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
185 TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
186 "max_num_events=%d < max_events=%d",
187 info.max_num_events, MAX_EVENTS);
189 devconf_set_default_sane_values(&dev_conf, &info);
190 if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
191 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
193 ret = rte_event_dev_configure(evdev, &dev_conf);
194 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
196 uint32_t queue_count;
197 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
198 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
199 &queue_count), "Queue count get failed");
201 if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
202 if (queue_count > 8) {
203 printf("test expects the unique priority per queue\n");
207 /* Configure event queues(0 to n) with
208 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
209 * RTE_EVENT_DEV_PRIORITY_LOWEST
211 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
213 for (i = 0; i < (int)queue_count; i++) {
214 struct rte_event_queue_conf queue_conf;
216 ret = rte_event_queue_default_conf_get(evdev, i,
218 TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d", i);
219 queue_conf.priority = i * step;
220 ret = rte_event_queue_setup(evdev, i, &queue_conf);
221 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
225 /* Configure event queues with default priority */
226 for (i = 0; i < (int)queue_count; i++) {
227 ret = rte_event_queue_setup(evdev, i, NULL);
228 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
231 /* Configure event ports */
233 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
234 RTE_EVENT_DEV_ATTR_PORT_COUNT,
235 &port_count), "Port count get failed");
236 for (i = 0; i < (int)port_count; i++) {
237 ret = rte_event_port_setup(evdev, i, NULL);
238 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
239 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
240 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", i);
243 ret = rte_event_dev_start(evdev);
244 TEST_ASSERT_SUCCESS(ret, "Failed to start device");
252 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
256 eventdev_setup_priority(void)
258 return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
262 eventdev_setup_dequeue_timeout(void)
264 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
268 eventdev_teardown(void)
270 rte_event_dev_stop(evdev);
271 rte_mempool_free(eventdev_test_mempool);
275 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
276 uint32_t flow_id, uint8_t event_type,
277 uint8_t sub_event_type, uint8_t sched_type,
278 uint8_t queue, uint8_t port)
280 struct event_attr *attr;
282 /* Store the event attributes in mbuf for future reference */
283 attr = rte_pktmbuf_mtod(m, struct event_attr *);
284 attr->flow_id = flow_id;
285 attr->event_type = event_type;
286 attr->sub_event_type = sub_event_type;
287 attr->sched_type = sched_type;
291 ev->flow_id = flow_id;
292 ev->sub_event_type = sub_event_type;
293 ev->event_type = event_type;
294 /* Inject the new event */
295 ev->op = RTE_EVENT_OP_NEW;
296 ev->sched_type = sched_type;
297 ev->queue_id = queue;
302 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
303 uint8_t sched_type, uint8_t queue, uint8_t port,
309 for (i = 0; i < events; i++) {
310 struct rte_event ev = {.event = 0, .u64 = 0};
312 m = rte_pktmbuf_alloc(eventdev_test_mempool);
313 TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
316 update_event_and_validation_attr(m, &ev, flow_id, event_type,
317 sub_event_type, sched_type, queue, port);
318 rte_event_enqueue_burst(evdev, port, &ev, 1);
324 check_excess_events(uint8_t port)
327 uint16_t valid_event;
330 /* Check for excess events, try for a few times and exit */
331 for (i = 0; i < 32; i++) {
332 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
334 TEST_ASSERT_SUCCESS(valid_event, "Unexpected valid event=%d",
341 generate_random_events(const unsigned int total_events)
343 struct rte_event_dev_info info;
347 uint32_t queue_count;
348 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
349 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
350 &queue_count), "Queue count get failed");
352 ret = rte_event_dev_info_get(evdev, &info);
353 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
354 for (i = 0; i < total_events; i++) {
356 rte_rand() % info.max_event_queue_flows /*flow_id */,
357 rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
358 rte_rand() % 256 /* sub_event_type */,
359 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
360 rte_rand() % queue_count /* queue */,
371 validate_event(struct rte_event *ev)
373 struct event_attr *attr;
375 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
376 TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
377 "flow_id mismatch enq=%d deq =%d",
378 attr->flow_id, ev->flow_id);
379 TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
380 "event_type mismatch enq=%d deq =%d",
381 attr->event_type, ev->event_type);
382 TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
383 "sub_event_type mismatch enq=%d deq =%d",
384 attr->sub_event_type, ev->sub_event_type);
385 TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
386 "sched_type mismatch enq=%d deq =%d",
387 attr->sched_type, ev->sched_type);
388 TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
389 "queue mismatch enq=%d deq =%d",
390 attr->queue, ev->queue_id);
394 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
395 struct rte_event *ev);
398 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
401 uint16_t valid_event;
402 uint32_t events = 0, forward_progress_cnt = 0, index = 0;
406 if (++forward_progress_cnt > UINT16_MAX) {
407 printf("Detected deadlock\n");
411 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
415 forward_progress_cnt = 0;
416 ret = validate_event(&ev);
421 ret = fn(index, port, &ev);
422 TEST_ASSERT_SUCCESS(ret,
423 "Failed to validate test specific event");
428 rte_pktmbuf_free(ev.mbuf);
429 if (++events >= total_events)
433 return check_excess_events(port);
437 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
440 TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d", index,
446 test_simple_enqdeq(uint8_t sched_type)
450 ret = inject_events(0 /*flow_id */,
451 RTE_EVENT_TYPE_CPU /* event_type */,
452 0 /* sub_event_type */,
460 return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
464 test_simple_enqdeq_ordered(void)
466 return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
470 test_simple_enqdeq_atomic(void)
472 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
476 test_simple_enqdeq_parallel(void)
478 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
482 * Generate a prescribed number of events and spread them across available
483 * queues. On dequeue, using single event port(port 0) verify the enqueued
487 test_multi_queue_enq_single_port_deq(void)
491 ret = generate_random_events(MAX_EVENTS);
495 return consume_events(0 /* port */, MAX_EVENTS, NULL);
499 * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
502 * For example, Inject 32 events over 0..7 queues
503 * enqueue events 0, 8, 16, 24 in queue 0
504 * enqueue events 1, 9, 17, 25 in queue 1
507 * enqueue events 7, 15, 23, 31 in queue 7
509 * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
510 * order from queue0(highest priority) to queue7(lowest_priority)
513 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
515 uint32_t queue_count;
516 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
517 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
518 &queue_count), "Queue count get failed");
519 uint32_t range = MAX_EVENTS / queue_count;
520 uint32_t expected_val = (index % range) * queue_count;
522 expected_val += ev->queue_id;
524 TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
525 "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
526 ev->mbuf->seqn, index, expected_val, range,
527 queue_count, MAX_EVENTS);
532 test_multi_queue_priority(void)
536 int i, max_evts_roundoff;
538 /* See validate_queue_priority() comments for priority validate logic */
539 uint32_t queue_count;
540 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
541 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
542 &queue_count), "Queue count get failed");
543 max_evts_roundoff = MAX_EVENTS / queue_count;
544 max_evts_roundoff *= queue_count;
546 for (i = 0; i < max_evts_roundoff; i++) {
547 struct rte_event ev = {.event = 0, .u64 = 0};
549 m = rte_pktmbuf_alloc(eventdev_test_mempool);
550 TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
553 queue = i % queue_count;
554 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
555 0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
556 rte_event_enqueue_burst(evdev, 0, &ev, 1);
559 return consume_events(0, max_evts_roundoff, validate_queue_priority);
563 worker_multi_port_fn(void *arg)
565 struct test_core_param *param = arg;
567 uint16_t valid_event;
568 uint8_t port = param->port;
569 rte_atomic32_t *total_events = param->total_events;
572 while (rte_atomic32_read(total_events) > 0) {
573 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
577 ret = validate_event(&ev);
578 TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
579 rte_pktmbuf_free(ev.mbuf);
580 rte_atomic32_sub(total_events, 1);
586 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
588 uint64_t cycles, print_cycles;
590 print_cycles = cycles = rte_get_timer_cycles();
591 while (rte_eal_get_lcore_state(lcore) != FINISHED) {
592 uint64_t new_cycles = rte_get_timer_cycles();
594 if (new_cycles - print_cycles > rte_get_timer_hz()) {
595 printf("\r%s: events %d\n", __func__,
596 rte_atomic32_read(count));
597 print_cycles = new_cycles;
599 if (new_cycles - cycles > rte_get_timer_hz() * 10) {
600 printf("%s: No schedules for seconds, deadlock (%d)\n",
602 rte_atomic32_read(count));
603 rte_event_dev_dump(evdev, stdout);
608 rte_eal_mp_wait_lcore();
614 launch_workers_and_wait(int (*master_worker)(void *),
615 int (*slave_workers)(void *), uint32_t total_events,
616 uint8_t nb_workers, uint8_t sched_type)
621 struct test_core_param *param;
622 rte_atomic32_t atomic_total_events;
623 uint64_t dequeue_tmo_ticks;
628 rte_atomic32_set(&atomic_total_events, total_events);
631 param = malloc(sizeof(struct test_core_param) * nb_workers);
635 ret = rte_event_dequeue_timeout_ticks(evdev,
636 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
640 param[0].total_events = &atomic_total_events;
641 param[0].sched_type = sched_type;
643 param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
646 w_lcore = rte_get_next_lcore(
650 rte_eal_remote_launch(master_worker, ¶m[0], w_lcore);
652 for (port = 1; port < nb_workers; port++) {
653 param[port].total_events = &atomic_total_events;
654 param[port].sched_type = sched_type;
655 param[port].port = port;
656 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
658 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
659 rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore);
662 ret = wait_workers_to_join(w_lcore, &atomic_total_events);
668 * Generate a prescribed number of events and spread them across available
669 * queues. Dequeue the events through multiple ports and verify the enqueued
673 test_multi_queue_enq_multi_port_deq(void)
675 const unsigned int total_events = MAX_EVENTS;
679 ret = generate_random_events(total_events);
683 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
684 RTE_EVENT_DEV_ATTR_PORT_COUNT,
685 &nr_ports), "Port count get failed");
686 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
689 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
690 nr_ports, rte_lcore_count() - 1);
694 return launch_workers_and_wait(worker_multi_port_fn,
695 worker_multi_port_fn, total_events,
696 nr_ports, 0xff /* invalid */);
700 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
701 struct rte_event *ev)
704 TEST_ASSERT_EQUAL(port, ev->queue_id,
705 "queue mismatch enq=%d deq =%d",
711 * Link queue x to port x and check correctness of link by checking
712 * queue_id == x on dequeue on the specific port x
715 test_queue_to_port_single_link(void)
717 int i, nr_links, ret;
720 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
721 RTE_EVENT_DEV_ATTR_PORT_COUNT,
722 &port_count), "Port count get failed");
724 /* Unlink all connections that created in eventdev_setup */
725 for (i = 0; i < (int)port_count; i++) {
726 ret = rte_event_port_unlink(evdev, i, NULL, 0);
727 TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d", i);
730 uint32_t queue_count;
731 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
732 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
733 &queue_count), "Queue count get failed");
735 nr_links = RTE_MIN(port_count, queue_count);
736 const unsigned int total_events = MAX_EVENTS / nr_links;
738 /* Link queue x to port x and inject events to queue x through port x */
739 for (i = 0; i < nr_links; i++) {
740 uint8_t queue = (uint8_t)i;
742 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
743 TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
747 rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
748 rte_rand() % 256 /* sub_event_type */,
749 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
752 total_events /* events */);
757 /* Verify the events generated from correct queue */
758 for (i = 0; i < nr_links; i++) {
759 ret = consume_events(i /* port */, total_events,
760 validate_queue_to_port_single_link);
769 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
770 struct rte_event *ev)
773 TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
774 "queue mismatch enq=%d deq =%d",
780 * Link all even number of queues to port 0 and all odd number of queues to
781 * port 1 and verify the link connection on dequeue
784 test_queue_to_port_multi_link(void)
786 int ret, port0_events = 0, port1_events = 0;
788 uint32_t nr_queues = 0;
789 uint32_t nr_ports = 0;
791 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
792 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
793 &nr_queues), "Queue count get failed");
795 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
796 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
797 &nr_queues), "Queue count get failed");
798 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
799 RTE_EVENT_DEV_ATTR_PORT_COUNT,
800 &nr_ports), "Port count get failed");
803 printf("%s: Not enough ports to test ports=%d\n",
808 /* Unlink all connections that created in eventdev_setup */
809 for (port = 0; port < nr_ports; port++) {
810 ret = rte_event_port_unlink(evdev, port, NULL, 0);
811 TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
815 const unsigned int total_events = MAX_EVENTS / nr_queues;
817 /* Link all even number of queues to port0 and odd numbers to port 1*/
818 for (queue = 0; queue < nr_queues; queue++) {
820 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
821 TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
826 rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
827 rte_rand() % 256 /* sub_event_type */,
828 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
831 total_events /* events */);
836 port0_events += total_events;
838 port1_events += total_events;
841 ret = consume_events(0 /* port */, port0_events,
842 validate_queue_to_port_multi_link);
845 ret = consume_events(1 /* port */, port1_events,
846 validate_queue_to_port_multi_link);
854 worker_flow_based_pipeline(void *arg)
856 struct test_core_param *param = arg;
858 uint16_t valid_event;
859 uint8_t port = param->port;
860 uint8_t new_sched_type = param->sched_type;
861 rte_atomic32_t *total_events = param->total_events;
862 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
864 while (rte_atomic32_read(total_events) > 0) {
865 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
870 /* Events from stage 0 */
871 if (ev.sub_event_type == 0) {
872 /* Move to atomic flow to maintain the ordering */
874 ev.event_type = RTE_EVENT_TYPE_CPU;
875 ev.sub_event_type = 1; /* stage 1 */
876 ev.sched_type = new_sched_type;
877 ev.op = RTE_EVENT_OP_FORWARD;
878 rte_event_enqueue_burst(evdev, port, &ev, 1);
879 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
880 if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
881 rte_pktmbuf_free(ev.mbuf);
882 rte_atomic32_sub(total_events, 1);
884 printf("Failed to update seqn_list\n");
888 printf("Invalid ev.sub_event_type = %d\n",
897 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
898 uint8_t out_sched_type)
900 const unsigned int total_events = MAX_EVENTS;
904 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
905 RTE_EVENT_DEV_ATTR_PORT_COUNT,
906 &nr_ports), "Port count get failed");
907 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
910 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
911 nr_ports, rte_lcore_count() - 1);
915 /* Injects events with m->seqn=0 to total_events */
918 RTE_EVENT_TYPE_CPU /* event_type */,
919 0 /* sub_event_type (stage 0) */,
923 total_events /* events */);
927 ret = launch_workers_and_wait(worker_flow_based_pipeline,
928 worker_flow_based_pipeline,
929 total_events, nr_ports, out_sched_type);
933 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
934 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
935 /* Check the events order maintained or not */
936 return seqn_list_check(total_events);
942 /* Multi port ordered to atomic transaction */
944 test_multi_port_flow_ordered_to_atomic(void)
946 /* Ingress event order test */
947 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
948 RTE_SCHED_TYPE_ATOMIC);
952 test_multi_port_flow_ordered_to_ordered(void)
954 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
955 RTE_SCHED_TYPE_ORDERED);
959 test_multi_port_flow_ordered_to_parallel(void)
961 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
962 RTE_SCHED_TYPE_PARALLEL);
966 test_multi_port_flow_atomic_to_atomic(void)
968 /* Ingress event order test */
969 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
970 RTE_SCHED_TYPE_ATOMIC);
974 test_multi_port_flow_atomic_to_ordered(void)
976 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
977 RTE_SCHED_TYPE_ORDERED);
981 test_multi_port_flow_atomic_to_parallel(void)
983 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
984 RTE_SCHED_TYPE_PARALLEL);
988 test_multi_port_flow_parallel_to_atomic(void)
990 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
991 RTE_SCHED_TYPE_ATOMIC);
995 test_multi_port_flow_parallel_to_ordered(void)
997 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
998 RTE_SCHED_TYPE_ORDERED);
1002 test_multi_port_flow_parallel_to_parallel(void)
1004 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1005 RTE_SCHED_TYPE_PARALLEL);
1009 worker_group_based_pipeline(void *arg)
1011 struct test_core_param *param = arg;
1012 struct rte_event ev;
1013 uint16_t valid_event;
1014 uint8_t port = param->port;
1015 uint8_t new_sched_type = param->sched_type;
1016 rte_atomic32_t *total_events = param->total_events;
1017 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
1019 while (rte_atomic32_read(total_events) > 0) {
1020 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
1025 /* Events from stage 0(group 0) */
1026 if (ev.queue_id == 0) {
1027 /* Move to atomic flow to maintain the ordering */
1029 ev.event_type = RTE_EVENT_TYPE_CPU;
1030 ev.sched_type = new_sched_type;
1031 ev.queue_id = 1; /* Stage 1*/
1032 ev.op = RTE_EVENT_OP_FORWARD;
1033 rte_event_enqueue_burst(evdev, port, &ev, 1);
1034 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
1035 if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
1036 rte_pktmbuf_free(ev.mbuf);
1037 rte_atomic32_sub(total_events, 1);
1039 printf("Failed to update seqn_list\n");
1043 printf("Invalid ev.queue_id = %d\n", ev.queue_id);
1053 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1054 uint8_t out_sched_type)
1056 const unsigned int total_events = MAX_EVENTS;
1060 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1061 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1062 &nr_ports), "Port count get failed");
1064 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1066 uint32_t queue_count;
1067 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1068 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1069 &queue_count), "Queue count get failed");
1070 if (queue_count < 2 || !nr_ports) {
1071 printf("%s: Not enough queues=%d ports=%d or workers=%d\n",
1072 __func__, queue_count, nr_ports,
1073 rte_lcore_count() - 1);
1074 return TEST_SUCCESS;
1077 /* Injects events with m->seqn=0 to total_events */
1078 ret = inject_events(
1080 RTE_EVENT_TYPE_CPU /* event_type */,
1081 0 /* sub_event_type (stage 0) */,
1085 total_events /* events */);
1089 ret = launch_workers_and_wait(worker_group_based_pipeline,
1090 worker_group_based_pipeline,
1091 total_events, nr_ports, out_sched_type);
1095 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1096 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1097 /* Check the events order maintained or not */
1098 return seqn_list_check(total_events);
1100 return TEST_SUCCESS;
1104 test_multi_port_queue_ordered_to_atomic(void)
1106 /* Ingress event order test */
1107 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1108 RTE_SCHED_TYPE_ATOMIC);
1112 test_multi_port_queue_ordered_to_ordered(void)
1114 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1115 RTE_SCHED_TYPE_ORDERED);
1119 test_multi_port_queue_ordered_to_parallel(void)
1121 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1122 RTE_SCHED_TYPE_PARALLEL);
1126 test_multi_port_queue_atomic_to_atomic(void)
1128 /* Ingress event order test */
1129 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1130 RTE_SCHED_TYPE_ATOMIC);
1134 test_multi_port_queue_atomic_to_ordered(void)
1136 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1137 RTE_SCHED_TYPE_ORDERED);
1141 test_multi_port_queue_atomic_to_parallel(void)
1143 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1144 RTE_SCHED_TYPE_PARALLEL);
1148 test_multi_port_queue_parallel_to_atomic(void)
1150 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1151 RTE_SCHED_TYPE_ATOMIC);
1155 test_multi_port_queue_parallel_to_ordered(void)
1157 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1158 RTE_SCHED_TYPE_ORDERED);
1162 test_multi_port_queue_parallel_to_parallel(void)
1164 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1165 RTE_SCHED_TYPE_PARALLEL);
1169 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1171 struct test_core_param *param = arg;
1172 struct rte_event ev;
1173 uint16_t valid_event;
1174 uint8_t port = param->port;
1175 rte_atomic32_t *total_events = param->total_events;
1177 while (rte_atomic32_read(total_events) > 0) {
1178 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1182 if (ev.sub_event_type == 255) { /* last stage */
1183 rte_pktmbuf_free(ev.mbuf);
1184 rte_atomic32_sub(total_events, 1);
1186 ev.event_type = RTE_EVENT_TYPE_CPU;
1187 ev.sub_event_type++;
1189 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1190 ev.op = RTE_EVENT_OP_FORWARD;
1191 rte_event_enqueue_burst(evdev, port, &ev, 1);
1198 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1203 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1204 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1205 &nr_ports), "Port count get failed");
1206 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1209 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
1210 nr_ports, rte_lcore_count() - 1);
1211 return TEST_SUCCESS;
1214 /* Injects events with m->seqn=0 to total_events */
1215 ret = inject_events(
1217 RTE_EVENT_TYPE_CPU /* event_type */,
1218 0 /* sub_event_type (stage 0) */,
1219 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1222 MAX_EVENTS /* events */);
1226 return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1227 0xff /* invalid */);
1230 /* Flow based pipeline with maximum stages with random sched type */
1232 test_multi_port_flow_max_stages_random_sched_type(void)
1234 return launch_multi_port_max_stages_random_sched_type(
1235 worker_flow_based_pipeline_max_stages_rand_sched_type);
1239 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1241 struct test_core_param *param = arg;
1242 struct rte_event ev;
1243 uint16_t valid_event;
1244 uint8_t port = param->port;
1245 uint32_t queue_count;
1246 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1247 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1248 &queue_count), "Queue count get failed");
1249 uint8_t nr_queues = queue_count;
1250 rte_atomic32_t *total_events = param->total_events;
1252 while (rte_atomic32_read(total_events) > 0) {
1253 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1257 if (ev.queue_id == nr_queues - 1) { /* last stage */
1258 rte_pktmbuf_free(ev.mbuf);
1259 rte_atomic32_sub(total_events, 1);
1261 ev.event_type = RTE_EVENT_TYPE_CPU;
1264 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1265 ev.op = RTE_EVENT_OP_FORWARD;
1266 rte_event_enqueue_burst(evdev, port, &ev, 1);
1272 /* Queue based pipeline with maximum stages with random sched type */
1274 test_multi_port_queue_max_stages_random_sched_type(void)
1276 return launch_multi_port_max_stages_random_sched_type(
1277 worker_queue_based_pipeline_max_stages_rand_sched_type);
1281 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1283 struct test_core_param *param = arg;
1284 struct rte_event ev;
1285 uint16_t valid_event;
1286 uint8_t port = param->port;
1287 uint32_t queue_count;
1288 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1289 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1290 &queue_count), "Queue count get failed");
1291 uint8_t nr_queues = queue_count;
1292 rte_atomic32_t *total_events = param->total_events;
1294 while (rte_atomic32_read(total_events) > 0) {
1295 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1299 if (ev.queue_id == nr_queues - 1) { /* Last stage */
1300 rte_pktmbuf_free(ev.mbuf);
1301 rte_atomic32_sub(total_events, 1);
1303 ev.event_type = RTE_EVENT_TYPE_CPU;
1305 ev.sub_event_type = rte_rand() % 256;
1307 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1308 ev.op = RTE_EVENT_OP_FORWARD;
1309 rte_event_enqueue_burst(evdev, port, &ev, 1);
1315 /* Queue and flow based pipeline with maximum stages with random sched type */
1317 test_multi_port_mixed_max_stages_random_sched_type(void)
1319 return launch_multi_port_max_stages_random_sched_type(
1320 worker_mixed_pipeline_max_stages_rand_sched_type);
1324 worker_ordered_flow_producer(void *arg)
1326 struct test_core_param *param = arg;
1327 uint8_t port = param->port;
1331 while (counter < NUM_PACKETS) {
1332 m = rte_pktmbuf_alloc(eventdev_test_mempool);
1336 m->seqn = counter++;
1338 struct rte_event ev = {.event = 0, .u64 = 0};
1340 ev.flow_id = 0x1; /* Generate a fat flow */
1341 ev.sub_event_type = 0;
1342 /* Inject the new event */
1343 ev.op = RTE_EVENT_OP_NEW;
1344 ev.event_type = RTE_EVENT_TYPE_CPU;
1345 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1348 rte_event_enqueue_burst(evdev, port, &ev, 1);
1355 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1359 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1360 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1361 &nr_ports), "Port count get failed");
1362 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1364 if (rte_lcore_count() < 3 || nr_ports < 2) {
1365 printf("### Not enough cores for %s test.\n", __func__);
1366 return TEST_SUCCESS;
1369 launch_workers_and_wait(worker_ordered_flow_producer, fn,
1370 NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
1371 /* Check the events order maintained or not */
1372 return seqn_list_check(NUM_PACKETS);
1375 /* Flow based producer consumer ingress order test */
1377 test_flow_producer_consumer_ingress_order_test(void)
1379 return test_producer_consumer_ingress_order_test(
1380 worker_flow_based_pipeline);
1383 /* Queue based producer consumer ingress order test */
1385 test_queue_producer_consumer_ingress_order_test(void)
1387 return test_producer_consumer_ingress_order_test(
1388 worker_group_based_pipeline);
1391 static struct unit_test_suite eventdev_octeontx_testsuite = {
1392 .suite_name = "eventdev octeontx unit test suite",
1393 .setup = testsuite_setup,
1394 .teardown = testsuite_teardown,
1395 .unit_test_cases = {
1396 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1397 test_simple_enqdeq_ordered),
1398 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1399 test_simple_enqdeq_atomic),
1400 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1401 test_simple_enqdeq_parallel),
1402 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1403 test_multi_queue_enq_single_port_deq),
1404 TEST_CASE_ST(eventdev_setup_priority, eventdev_teardown,
1405 test_multi_queue_priority),
1406 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1407 test_multi_queue_enq_multi_port_deq),
1408 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1409 test_queue_to_port_single_link),
1410 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1411 test_queue_to_port_multi_link),
1412 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1413 test_multi_port_flow_ordered_to_atomic),
1414 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1415 test_multi_port_flow_ordered_to_ordered),
1416 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1417 test_multi_port_flow_ordered_to_parallel),
1418 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1419 test_multi_port_flow_atomic_to_atomic),
1420 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1421 test_multi_port_flow_atomic_to_ordered),
1422 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1423 test_multi_port_flow_atomic_to_parallel),
1424 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1425 test_multi_port_flow_parallel_to_atomic),
1426 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1427 test_multi_port_flow_parallel_to_ordered),
1428 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1429 test_multi_port_flow_parallel_to_parallel),
1430 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1431 test_multi_port_queue_ordered_to_atomic),
1432 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1433 test_multi_port_queue_ordered_to_ordered),
1434 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1435 test_multi_port_queue_ordered_to_parallel),
1436 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1437 test_multi_port_queue_atomic_to_atomic),
1438 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1439 test_multi_port_queue_atomic_to_ordered),
1440 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1441 test_multi_port_queue_atomic_to_parallel),
1442 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1443 test_multi_port_queue_parallel_to_atomic),
1444 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1445 test_multi_port_queue_parallel_to_ordered),
1446 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1447 test_multi_port_queue_parallel_to_parallel),
1448 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1449 test_multi_port_flow_max_stages_random_sched_type),
1450 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1451 test_multi_port_queue_max_stages_random_sched_type),
1452 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1453 test_multi_port_mixed_max_stages_random_sched_type),
1454 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1455 test_flow_producer_consumer_ingress_order_test),
1456 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1457 test_queue_producer_consumer_ingress_order_test),
1458 /* Tests with dequeue timeout */
1459 TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
1460 test_multi_port_flow_ordered_to_atomic),
1461 TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
1462 test_multi_port_queue_ordered_to_atomic),
1463 TEST_CASES_END() /**< NULL terminate unit test array */
1468 test_eventdev_octeontx(void)
1470 return unit_test_suite_runner(&eventdev_octeontx_testsuite);
1473 REGISTER_TEST_COMMAND(eventdev_octeontx_autotest, test_eventdev_octeontx);