4 * Copyright(c) 2017 Cavium, Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_atomic.h>
34 #include <rte_common.h>
35 #include <rte_cycles.h>
36 #include <rte_debug.h>
38 #include <rte_ethdev.h>
39 #include <rte_eventdev.h>
40 #include <rte_hexdump.h>
42 #include <rte_malloc.h>
43 #include <rte_memcpy.h>
44 #include <rte_launch.h>
45 #include <rte_lcore.h>
46 #include <rte_per_lcore.h>
47 #include <rte_random.h>
48 #include <rte_bus_vdev.h>
52 #define NUM_PACKETS (1 << 18)
53 #define MAX_EVENTS (16 * 1024)
56 static struct rte_mempool *eventdev_test_mempool;
61 uint8_t sub_event_type;
67 static uint32_t seqn_list_index;
68 static int seqn_list[NUM_PACKETS];
73 RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
74 memset(seqn_list, 0, sizeof(seqn_list));
79 seqn_list_update(int val)
81 if (seqn_list_index >= NUM_PACKETS)
84 seqn_list[seqn_list_index++] = val;
90 seqn_list_check(int limit)
94 for (i = 0; i < limit; i++) {
95 if (seqn_list[i] != i) {
96 printf("Seqn mismatch %d %d\n", seqn_list[i], i);
103 struct test_core_param {
104 rte_atomic32_t *total_events;
105 uint64_t dequeue_tmo_ticks;
111 testsuite_setup(void)
113 const char *eventdev_name = "event_octeontx";
115 evdev = rte_event_dev_get_dev_id(eventdev_name);
117 printf("%d: Eventdev %s not found - creating.\n",
118 __LINE__, eventdev_name);
119 if (rte_vdev_init(eventdev_name, NULL) < 0) {
120 printf("Error creating eventdev %s\n", eventdev_name);
123 evdev = rte_event_dev_get_dev_id(eventdev_name);
125 printf("Error finding newly created eventdev\n");
134 testsuite_teardown(void)
136 rte_event_dev_close(evdev);
140 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
141 struct rte_event_dev_info *info)
143 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
144 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
145 dev_conf->nb_event_ports = info->max_event_ports;
146 dev_conf->nb_event_queues = info->max_event_queues;
147 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
148 dev_conf->nb_event_port_dequeue_depth =
149 info->max_event_port_dequeue_depth;
150 dev_conf->nb_event_port_enqueue_depth =
151 info->max_event_port_enqueue_depth;
152 dev_conf->nb_event_port_enqueue_depth =
153 info->max_event_port_enqueue_depth;
154 dev_conf->nb_events_limit =
155 info->max_num_events;
159 TEST_EVENTDEV_SETUP_DEFAULT,
160 TEST_EVENTDEV_SETUP_PRIORITY,
161 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
165 _eventdev_setup(int mode)
168 struct rte_event_dev_config dev_conf;
169 struct rte_event_dev_info info;
170 const char *pool_name = "evdev_octeontx_test_pool";
172 /* Create and destrory pool for each test case to make it standalone */
173 eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
175 0 /*MBUF_CACHE_SIZE*/,
177 512, /* Use very small mbufs */
179 if (!eventdev_test_mempool) {
180 printf("ERROR creating mempool\n");
184 ret = rte_event_dev_info_get(evdev, &info);
185 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
186 TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
187 "max_num_events=%d < max_events=%d",
188 info.max_num_events, MAX_EVENTS);
190 devconf_set_default_sane_values(&dev_conf, &info);
191 if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
192 dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
194 ret = rte_event_dev_configure(evdev, &dev_conf);
195 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
197 uint32_t queue_count;
198 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
199 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
200 &queue_count), "Queue count get failed");
202 if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
203 if (queue_count > 8) {
204 printf("test expects the unique priority per queue\n");
208 /* Configure event queues(0 to n) with
209 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
210 * RTE_EVENT_DEV_PRIORITY_LOWEST
212 uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
214 for (i = 0; i < (int)queue_count; i++) {
215 struct rte_event_queue_conf queue_conf;
217 ret = rte_event_queue_default_conf_get(evdev, i,
219 TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d", i);
220 queue_conf.priority = i * step;
221 ret = rte_event_queue_setup(evdev, i, &queue_conf);
222 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
226 /* Configure event queues with default priority */
227 for (i = 0; i < (int)queue_count; i++) {
228 ret = rte_event_queue_setup(evdev, i, NULL);
229 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
232 /* Configure event ports */
234 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
235 RTE_EVENT_DEV_ATTR_PORT_COUNT,
236 &port_count), "Port count get failed");
237 for (i = 0; i < (int)port_count; i++) {
238 ret = rte_event_port_setup(evdev, i, NULL);
239 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
240 ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
241 TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", i);
244 ret = rte_event_dev_start(evdev);
245 TEST_ASSERT_SUCCESS(ret, "Failed to start device");
253 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
257 eventdev_setup_priority(void)
259 return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
263 eventdev_setup_dequeue_timeout(void)
265 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
269 eventdev_teardown(void)
271 rte_event_dev_stop(evdev);
272 rte_mempool_free(eventdev_test_mempool);
276 update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
277 uint32_t flow_id, uint8_t event_type,
278 uint8_t sub_event_type, uint8_t sched_type,
279 uint8_t queue, uint8_t port)
281 struct event_attr *attr;
283 /* Store the event attributes in mbuf for future reference */
284 attr = rte_pktmbuf_mtod(m, struct event_attr *);
285 attr->flow_id = flow_id;
286 attr->event_type = event_type;
287 attr->sub_event_type = sub_event_type;
288 attr->sched_type = sched_type;
292 ev->flow_id = flow_id;
293 ev->sub_event_type = sub_event_type;
294 ev->event_type = event_type;
295 /* Inject the new event */
296 ev->op = RTE_EVENT_OP_NEW;
297 ev->sched_type = sched_type;
298 ev->queue_id = queue;
303 inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
304 uint8_t sched_type, uint8_t queue, uint8_t port,
310 for (i = 0; i < events; i++) {
311 struct rte_event ev = {.event = 0, .u64 = 0};
313 m = rte_pktmbuf_alloc(eventdev_test_mempool);
314 TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
317 update_event_and_validation_attr(m, &ev, flow_id, event_type,
318 sub_event_type, sched_type, queue, port);
319 rte_event_enqueue_burst(evdev, port, &ev, 1);
325 check_excess_events(uint8_t port)
328 uint16_t valid_event;
331 /* Check for excess events, try for a few times and exit */
332 for (i = 0; i < 32; i++) {
333 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
335 TEST_ASSERT_SUCCESS(valid_event, "Unexpected valid event=%d",
342 generate_random_events(const unsigned int total_events)
344 struct rte_event_dev_info info;
348 uint32_t queue_count;
349 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
350 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
351 &queue_count), "Queue count get failed");
353 ret = rte_event_dev_info_get(evdev, &info);
354 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
355 for (i = 0; i < total_events; i++) {
357 rte_rand() % info.max_event_queue_flows /*flow_id */,
358 rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
359 rte_rand() % 256 /* sub_event_type */,
360 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
361 rte_rand() % queue_count /* queue */,
372 validate_event(struct rte_event *ev)
374 struct event_attr *attr;
376 attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
377 TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
378 "flow_id mismatch enq=%d deq =%d",
379 attr->flow_id, ev->flow_id);
380 TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
381 "event_type mismatch enq=%d deq =%d",
382 attr->event_type, ev->event_type);
383 TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
384 "sub_event_type mismatch enq=%d deq =%d",
385 attr->sub_event_type, ev->sub_event_type);
386 TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
387 "sched_type mismatch enq=%d deq =%d",
388 attr->sched_type, ev->sched_type);
389 TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
390 "queue mismatch enq=%d deq =%d",
391 attr->queue, ev->queue_id);
395 typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
396 struct rte_event *ev);
399 consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
402 uint16_t valid_event;
403 uint32_t events = 0, forward_progress_cnt = 0, index = 0;
407 if (++forward_progress_cnt > UINT16_MAX) {
408 printf("Detected deadlock\n");
412 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
416 forward_progress_cnt = 0;
417 ret = validate_event(&ev);
422 ret = fn(index, port, &ev);
423 TEST_ASSERT_SUCCESS(ret,
424 "Failed to validate test specific event");
429 rte_pktmbuf_free(ev.mbuf);
430 if (++events >= total_events)
434 return check_excess_events(port);
438 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
441 TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d", index,
447 test_simple_enqdeq(uint8_t sched_type)
451 ret = inject_events(0 /*flow_id */,
452 RTE_EVENT_TYPE_CPU /* event_type */,
453 0 /* sub_event_type */,
461 return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
465 test_simple_enqdeq_ordered(void)
467 return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
471 test_simple_enqdeq_atomic(void)
473 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
477 test_simple_enqdeq_parallel(void)
479 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
483 * Generate a prescribed number of events and spread them across available
484 * queues. On dequeue, using single event port(port 0) verify the enqueued
488 test_multi_queue_enq_single_port_deq(void)
492 ret = generate_random_events(MAX_EVENTS);
496 return consume_events(0 /* port */, MAX_EVENTS, NULL);
500 * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
503 * For example, Inject 32 events over 0..7 queues
504 * enqueue events 0, 8, 16, 24 in queue 0
505 * enqueue events 1, 9, 17, 25 in queue 1
508 * enqueue events 7, 15, 23, 31 in queue 7
510 * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
511 * order from queue0(highest priority) to queue7(lowest_priority)
514 validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
516 uint32_t queue_count;
517 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
518 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
519 &queue_count), "Queue count get failed");
520 uint32_t range = MAX_EVENTS / queue_count;
521 uint32_t expected_val = (index % range) * queue_count;
523 expected_val += ev->queue_id;
525 TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
526 "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
527 ev->mbuf->seqn, index, expected_val, range,
528 queue_count, MAX_EVENTS);
533 test_multi_queue_priority(void)
537 int i, max_evts_roundoff;
539 /* See validate_queue_priority() comments for priority validate logic */
540 uint32_t queue_count;
541 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
542 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
543 &queue_count), "Queue count get failed");
544 max_evts_roundoff = MAX_EVENTS / queue_count;
545 max_evts_roundoff *= queue_count;
547 for (i = 0; i < max_evts_roundoff; i++) {
548 struct rte_event ev = {.event = 0, .u64 = 0};
550 m = rte_pktmbuf_alloc(eventdev_test_mempool);
551 TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
554 queue = i % queue_count;
555 update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
556 0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
557 rte_event_enqueue_burst(evdev, 0, &ev, 1);
560 return consume_events(0, max_evts_roundoff, validate_queue_priority);
564 worker_multi_port_fn(void *arg)
566 struct test_core_param *param = arg;
568 uint16_t valid_event;
569 uint8_t port = param->port;
570 rte_atomic32_t *total_events = param->total_events;
573 while (rte_atomic32_read(total_events) > 0) {
574 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
578 ret = validate_event(&ev);
579 TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
580 rte_pktmbuf_free(ev.mbuf);
581 rte_atomic32_sub(total_events, 1);
587 wait_workers_to_join(int lcore, const rte_atomic32_t *count)
589 uint64_t cycles, print_cycles;
591 print_cycles = cycles = rte_get_timer_cycles();
592 while (rte_eal_get_lcore_state(lcore) != FINISHED) {
593 uint64_t new_cycles = rte_get_timer_cycles();
595 if (new_cycles - print_cycles > rte_get_timer_hz()) {
596 printf("\r%s: events %d\n", __func__,
597 rte_atomic32_read(count));
598 print_cycles = new_cycles;
600 if (new_cycles - cycles > rte_get_timer_hz() * 10) {
601 printf("%s: No schedules for seconds, deadlock (%d)\n",
603 rte_atomic32_read(count));
604 rte_event_dev_dump(evdev, stdout);
609 rte_eal_mp_wait_lcore();
615 launch_workers_and_wait(int (*master_worker)(void *),
616 int (*slave_workers)(void *), uint32_t total_events,
617 uint8_t nb_workers, uint8_t sched_type)
622 struct test_core_param *param;
623 rte_atomic32_t atomic_total_events;
624 uint64_t dequeue_tmo_ticks;
629 rte_atomic32_set(&atomic_total_events, total_events);
632 param = malloc(sizeof(struct test_core_param) * nb_workers);
636 ret = rte_event_dequeue_timeout_ticks(evdev,
637 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
641 param[0].total_events = &atomic_total_events;
642 param[0].sched_type = sched_type;
644 param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
647 w_lcore = rte_get_next_lcore(
651 rte_eal_remote_launch(master_worker, ¶m[0], w_lcore);
653 for (port = 1; port < nb_workers; port++) {
654 param[port].total_events = &atomic_total_events;
655 param[port].sched_type = sched_type;
656 param[port].port = port;
657 param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
659 w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
660 rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore);
663 ret = wait_workers_to_join(w_lcore, &atomic_total_events);
669 * Generate a prescribed number of events and spread them across available
670 * queues. Dequeue the events through multiple ports and verify the enqueued
674 test_multi_queue_enq_multi_port_deq(void)
676 const unsigned int total_events = MAX_EVENTS;
680 ret = generate_random_events(total_events);
684 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
685 RTE_EVENT_DEV_ATTR_PORT_COUNT,
686 &nr_ports), "Port count get failed");
687 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
690 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
691 nr_ports, rte_lcore_count() - 1);
695 return launch_workers_and_wait(worker_multi_port_fn,
696 worker_multi_port_fn, total_events,
697 nr_ports, 0xff /* invalid */);
701 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
702 struct rte_event *ev)
705 TEST_ASSERT_EQUAL(port, ev->queue_id,
706 "queue mismatch enq=%d deq =%d",
712 * Link queue x to port x and check correctness of link by checking
713 * queue_id == x on dequeue on the specific port x
716 test_queue_to_port_single_link(void)
718 int i, nr_links, ret;
721 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
722 RTE_EVENT_DEV_ATTR_PORT_COUNT,
723 &port_count), "Port count get failed");
725 /* Unlink all connections that created in eventdev_setup */
726 for (i = 0; i < (int)port_count; i++) {
727 ret = rte_event_port_unlink(evdev, i, NULL, 0);
728 TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d", i);
731 uint32_t queue_count;
732 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
733 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
734 &queue_count), "Queue count get failed");
736 nr_links = RTE_MIN(port_count, queue_count);
737 const unsigned int total_events = MAX_EVENTS / nr_links;
739 /* Link queue x to port x and inject events to queue x through port x */
740 for (i = 0; i < nr_links; i++) {
741 uint8_t queue = (uint8_t)i;
743 ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
744 TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
748 rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
749 rte_rand() % 256 /* sub_event_type */,
750 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
753 total_events /* events */);
758 /* Verify the events generated from correct queue */
759 for (i = 0; i < nr_links; i++) {
760 ret = consume_events(i /* port */, total_events,
761 validate_queue_to_port_single_link);
770 validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
771 struct rte_event *ev)
774 TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
775 "queue mismatch enq=%d deq =%d",
781 * Link all even number of queues to port 0 and all odd number of queues to
782 * port 1 and verify the link connection on dequeue
785 test_queue_to_port_multi_link(void)
787 int ret, port0_events = 0, port1_events = 0;
789 uint32_t nr_queues = 0;
790 uint32_t nr_ports = 0;
792 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
793 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
794 &nr_queues), "Queue count get failed");
796 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
797 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
798 &nr_queues), "Queue count get failed");
799 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
800 RTE_EVENT_DEV_ATTR_PORT_COUNT,
801 &nr_ports), "Port count get failed");
804 printf("%s: Not enough ports to test ports=%d\n",
809 /* Unlink all connections that created in eventdev_setup */
810 for (port = 0; port < nr_ports; port++) {
811 ret = rte_event_port_unlink(evdev, port, NULL, 0);
812 TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
816 const unsigned int total_events = MAX_EVENTS / nr_queues;
818 /* Link all even number of queues to port0 and odd numbers to port 1*/
819 for (queue = 0; queue < nr_queues; queue++) {
821 ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
822 TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
827 rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
828 rte_rand() % 256 /* sub_event_type */,
829 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
832 total_events /* events */);
837 port0_events += total_events;
839 port1_events += total_events;
842 ret = consume_events(0 /* port */, port0_events,
843 validate_queue_to_port_multi_link);
846 ret = consume_events(1 /* port */, port1_events,
847 validate_queue_to_port_multi_link);
855 worker_flow_based_pipeline(void *arg)
857 struct test_core_param *param = arg;
859 uint16_t valid_event;
860 uint8_t port = param->port;
861 uint8_t new_sched_type = param->sched_type;
862 rte_atomic32_t *total_events = param->total_events;
863 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
865 while (rte_atomic32_read(total_events) > 0) {
866 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
871 /* Events from stage 0 */
872 if (ev.sub_event_type == 0) {
873 /* Move to atomic flow to maintain the ordering */
875 ev.event_type = RTE_EVENT_TYPE_CPU;
876 ev.sub_event_type = 1; /* stage 1 */
877 ev.sched_type = new_sched_type;
878 ev.op = RTE_EVENT_OP_FORWARD;
879 rte_event_enqueue_burst(evdev, port, &ev, 1);
880 } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
881 if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
882 rte_pktmbuf_free(ev.mbuf);
883 rte_atomic32_sub(total_events, 1);
885 printf("Failed to update seqn_list\n");
889 printf("Invalid ev.sub_event_type = %d\n",
898 test_multiport_flow_sched_type_test(uint8_t in_sched_type,
899 uint8_t out_sched_type)
901 const unsigned int total_events = MAX_EVENTS;
905 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
906 RTE_EVENT_DEV_ATTR_PORT_COUNT,
907 &nr_ports), "Port count get failed");
908 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
911 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
912 nr_ports, rte_lcore_count() - 1);
916 /* Injects events with m->seqn=0 to total_events */
919 RTE_EVENT_TYPE_CPU /* event_type */,
920 0 /* sub_event_type (stage 0) */,
924 total_events /* events */);
928 ret = launch_workers_and_wait(worker_flow_based_pipeline,
929 worker_flow_based_pipeline,
930 total_events, nr_ports, out_sched_type);
934 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
935 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
936 /* Check the events order maintained or not */
937 return seqn_list_check(total_events);
943 /* Multi port ordered to atomic transaction */
945 test_multi_port_flow_ordered_to_atomic(void)
947 /* Ingress event order test */
948 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
949 RTE_SCHED_TYPE_ATOMIC);
953 test_multi_port_flow_ordered_to_ordered(void)
955 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
956 RTE_SCHED_TYPE_ORDERED);
960 test_multi_port_flow_ordered_to_parallel(void)
962 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
963 RTE_SCHED_TYPE_PARALLEL);
967 test_multi_port_flow_atomic_to_atomic(void)
969 /* Ingress event order test */
970 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
971 RTE_SCHED_TYPE_ATOMIC);
975 test_multi_port_flow_atomic_to_ordered(void)
977 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
978 RTE_SCHED_TYPE_ORDERED);
982 test_multi_port_flow_atomic_to_parallel(void)
984 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
985 RTE_SCHED_TYPE_PARALLEL);
989 test_multi_port_flow_parallel_to_atomic(void)
991 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
992 RTE_SCHED_TYPE_ATOMIC);
996 test_multi_port_flow_parallel_to_ordered(void)
998 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
999 RTE_SCHED_TYPE_ORDERED);
1003 test_multi_port_flow_parallel_to_parallel(void)
1005 return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1006 RTE_SCHED_TYPE_PARALLEL);
1010 worker_group_based_pipeline(void *arg)
1012 struct test_core_param *param = arg;
1013 struct rte_event ev;
1014 uint16_t valid_event;
1015 uint8_t port = param->port;
1016 uint8_t new_sched_type = param->sched_type;
1017 rte_atomic32_t *total_events = param->total_events;
1018 uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
1020 while (rte_atomic32_read(total_events) > 0) {
1021 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
1026 /* Events from stage 0(group 0) */
1027 if (ev.queue_id == 0) {
1028 /* Move to atomic flow to maintain the ordering */
1030 ev.event_type = RTE_EVENT_TYPE_CPU;
1031 ev.sched_type = new_sched_type;
1032 ev.queue_id = 1; /* Stage 1*/
1033 ev.op = RTE_EVENT_OP_FORWARD;
1034 rte_event_enqueue_burst(evdev, port, &ev, 1);
1035 } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
1036 if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
1037 rte_pktmbuf_free(ev.mbuf);
1038 rte_atomic32_sub(total_events, 1);
1040 printf("Failed to update seqn_list\n");
1044 printf("Invalid ev.queue_id = %d\n", ev.queue_id);
1054 test_multiport_queue_sched_type_test(uint8_t in_sched_type,
1055 uint8_t out_sched_type)
1057 const unsigned int total_events = MAX_EVENTS;
1061 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1062 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1063 &nr_ports), "Port count get failed");
1065 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1067 uint32_t queue_count;
1068 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1069 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1070 &queue_count), "Queue count get failed");
1071 if (queue_count < 2 || !nr_ports) {
1072 printf("%s: Not enough queues=%d ports=%d or workers=%d\n",
1073 __func__, queue_count, nr_ports,
1074 rte_lcore_count() - 1);
1075 return TEST_SUCCESS;
1078 /* Injects events with m->seqn=0 to total_events */
1079 ret = inject_events(
1081 RTE_EVENT_TYPE_CPU /* event_type */,
1082 0 /* sub_event_type (stage 0) */,
1086 total_events /* events */);
1090 ret = launch_workers_and_wait(worker_group_based_pipeline,
1091 worker_group_based_pipeline,
1092 total_events, nr_ports, out_sched_type);
1096 if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
1097 out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
1098 /* Check the events order maintained or not */
1099 return seqn_list_check(total_events);
1101 return TEST_SUCCESS;
1105 test_multi_port_queue_ordered_to_atomic(void)
1107 /* Ingress event order test */
1108 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1109 RTE_SCHED_TYPE_ATOMIC);
1113 test_multi_port_queue_ordered_to_ordered(void)
1115 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1116 RTE_SCHED_TYPE_ORDERED);
1120 test_multi_port_queue_ordered_to_parallel(void)
1122 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
1123 RTE_SCHED_TYPE_PARALLEL);
1127 test_multi_port_queue_atomic_to_atomic(void)
1129 /* Ingress event order test */
1130 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1131 RTE_SCHED_TYPE_ATOMIC);
1135 test_multi_port_queue_atomic_to_ordered(void)
1137 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1138 RTE_SCHED_TYPE_ORDERED);
1142 test_multi_port_queue_atomic_to_parallel(void)
1144 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
1145 RTE_SCHED_TYPE_PARALLEL);
1149 test_multi_port_queue_parallel_to_atomic(void)
1151 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1152 RTE_SCHED_TYPE_ATOMIC);
1156 test_multi_port_queue_parallel_to_ordered(void)
1158 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1159 RTE_SCHED_TYPE_ORDERED);
1163 test_multi_port_queue_parallel_to_parallel(void)
1165 return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
1166 RTE_SCHED_TYPE_PARALLEL);
1170 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
1172 struct test_core_param *param = arg;
1173 struct rte_event ev;
1174 uint16_t valid_event;
1175 uint8_t port = param->port;
1176 rte_atomic32_t *total_events = param->total_events;
1178 while (rte_atomic32_read(total_events) > 0) {
1179 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1183 if (ev.sub_event_type == 255) { /* last stage */
1184 rte_pktmbuf_free(ev.mbuf);
1185 rte_atomic32_sub(total_events, 1);
1187 ev.event_type = RTE_EVENT_TYPE_CPU;
1188 ev.sub_event_type++;
1190 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1191 ev.op = RTE_EVENT_OP_FORWARD;
1192 rte_event_enqueue_burst(evdev, port, &ev, 1);
1199 launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
1204 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1205 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1206 &nr_ports), "Port count get failed");
1207 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1210 printf("%s: Not enough ports=%d or workers=%d\n", __func__,
1211 nr_ports, rte_lcore_count() - 1);
1212 return TEST_SUCCESS;
1215 /* Injects events with m->seqn=0 to total_events */
1216 ret = inject_events(
1218 RTE_EVENT_TYPE_CPU /* event_type */,
1219 0 /* sub_event_type (stage 0) */,
1220 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
1223 MAX_EVENTS /* events */);
1227 return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
1228 0xff /* invalid */);
1231 /* Flow based pipeline with maximum stages with random sched type */
1233 test_multi_port_flow_max_stages_random_sched_type(void)
1235 return launch_multi_port_max_stages_random_sched_type(
1236 worker_flow_based_pipeline_max_stages_rand_sched_type);
1240 worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
1242 struct test_core_param *param = arg;
1243 struct rte_event ev;
1244 uint16_t valid_event;
1245 uint8_t port = param->port;
1246 uint32_t queue_count;
1247 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1248 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1249 &queue_count), "Queue count get failed");
1250 uint8_t nr_queues = queue_count;
1251 rte_atomic32_t *total_events = param->total_events;
1253 while (rte_atomic32_read(total_events) > 0) {
1254 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1258 if (ev.queue_id == nr_queues - 1) { /* last stage */
1259 rte_pktmbuf_free(ev.mbuf);
1260 rte_atomic32_sub(total_events, 1);
1262 ev.event_type = RTE_EVENT_TYPE_CPU;
1265 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1266 ev.op = RTE_EVENT_OP_FORWARD;
1267 rte_event_enqueue_burst(evdev, port, &ev, 1);
1273 /* Queue based pipeline with maximum stages with random sched type */
1275 test_multi_port_queue_max_stages_random_sched_type(void)
1277 return launch_multi_port_max_stages_random_sched_type(
1278 worker_queue_based_pipeline_max_stages_rand_sched_type);
1282 worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
1284 struct test_core_param *param = arg;
1285 struct rte_event ev;
1286 uint16_t valid_event;
1287 uint8_t port = param->port;
1288 uint32_t queue_count;
1289 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1290 RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1291 &queue_count), "Queue count get failed");
1292 uint8_t nr_queues = queue_count;
1293 rte_atomic32_t *total_events = param->total_events;
1295 while (rte_atomic32_read(total_events) > 0) {
1296 valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
1300 if (ev.queue_id == nr_queues - 1) { /* Last stage */
1301 rte_pktmbuf_free(ev.mbuf);
1302 rte_atomic32_sub(total_events, 1);
1304 ev.event_type = RTE_EVENT_TYPE_CPU;
1306 ev.sub_event_type = rte_rand() % 256;
1308 rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
1309 ev.op = RTE_EVENT_OP_FORWARD;
1310 rte_event_enqueue_burst(evdev, port, &ev, 1);
1316 /* Queue and flow based pipeline with maximum stages with random sched type */
1318 test_multi_port_mixed_max_stages_random_sched_type(void)
1320 return launch_multi_port_max_stages_random_sched_type(
1321 worker_mixed_pipeline_max_stages_rand_sched_type);
1325 worker_ordered_flow_producer(void *arg)
1327 struct test_core_param *param = arg;
1328 uint8_t port = param->port;
1332 while (counter < NUM_PACKETS) {
1333 m = rte_pktmbuf_alloc(eventdev_test_mempool);
1337 m->seqn = counter++;
1339 struct rte_event ev = {.event = 0, .u64 = 0};
1341 ev.flow_id = 0x1; /* Generate a fat flow */
1342 ev.sub_event_type = 0;
1343 /* Inject the new event */
1344 ev.op = RTE_EVENT_OP_NEW;
1345 ev.event_type = RTE_EVENT_TYPE_CPU;
1346 ev.sched_type = RTE_SCHED_TYPE_ORDERED;
1349 rte_event_enqueue_burst(evdev, port, &ev, 1);
1356 test_producer_consumer_ingress_order_test(int (*fn)(void *))
1360 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
1361 RTE_EVENT_DEV_ATTR_PORT_COUNT,
1362 &nr_ports), "Port count get failed");
1363 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
1365 if (rte_lcore_count() < 3 || nr_ports < 2) {
1366 printf("### Not enough cores for %s test.\n", __func__);
1367 return TEST_SUCCESS;
1370 launch_workers_and_wait(worker_ordered_flow_producer, fn,
1371 NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
1372 /* Check the events order maintained or not */
1373 return seqn_list_check(NUM_PACKETS);
1376 /* Flow based producer consumer ingress order test */
1378 test_flow_producer_consumer_ingress_order_test(void)
1380 return test_producer_consumer_ingress_order_test(
1381 worker_flow_based_pipeline);
1384 /* Queue based producer consumer ingress order test */
1386 test_queue_producer_consumer_ingress_order_test(void)
1388 return test_producer_consumer_ingress_order_test(
1389 worker_group_based_pipeline);
1392 static struct unit_test_suite eventdev_octeontx_testsuite = {
1393 .suite_name = "eventdev octeontx unit test suite",
1394 .setup = testsuite_setup,
1395 .teardown = testsuite_teardown,
1396 .unit_test_cases = {
1397 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1398 test_simple_enqdeq_ordered),
1399 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1400 test_simple_enqdeq_atomic),
1401 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1402 test_simple_enqdeq_parallel),
1403 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1404 test_multi_queue_enq_single_port_deq),
1405 TEST_CASE_ST(eventdev_setup_priority, eventdev_teardown,
1406 test_multi_queue_priority),
1407 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1408 test_multi_queue_enq_multi_port_deq),
1409 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1410 test_queue_to_port_single_link),
1411 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1412 test_queue_to_port_multi_link),
1413 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1414 test_multi_port_flow_ordered_to_atomic),
1415 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1416 test_multi_port_flow_ordered_to_ordered),
1417 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1418 test_multi_port_flow_ordered_to_parallel),
1419 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1420 test_multi_port_flow_atomic_to_atomic),
1421 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1422 test_multi_port_flow_atomic_to_ordered),
1423 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1424 test_multi_port_flow_atomic_to_parallel),
1425 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1426 test_multi_port_flow_parallel_to_atomic),
1427 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1428 test_multi_port_flow_parallel_to_ordered),
1429 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1430 test_multi_port_flow_parallel_to_parallel),
1431 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1432 test_multi_port_queue_ordered_to_atomic),
1433 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1434 test_multi_port_queue_ordered_to_ordered),
1435 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1436 test_multi_port_queue_ordered_to_parallel),
1437 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1438 test_multi_port_queue_atomic_to_atomic),
1439 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1440 test_multi_port_queue_atomic_to_ordered),
1441 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1442 test_multi_port_queue_atomic_to_parallel),
1443 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1444 test_multi_port_queue_parallel_to_atomic),
1445 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1446 test_multi_port_queue_parallel_to_ordered),
1447 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1448 test_multi_port_queue_parallel_to_parallel),
1449 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1450 test_multi_port_flow_max_stages_random_sched_type),
1451 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1452 test_multi_port_queue_max_stages_random_sched_type),
1453 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1454 test_multi_port_mixed_max_stages_random_sched_type),
1455 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1456 test_flow_producer_consumer_ingress_order_test),
1457 TEST_CASE_ST(eventdev_setup, eventdev_teardown,
1458 test_queue_producer_consumer_ingress_order_test),
1459 /* Tests with dequeue timeout */
1460 TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
1461 test_multi_port_flow_ordered_to_atomic),
1462 TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
1463 test_multi_port_queue_ordered_to_atomic),
1464 TEST_CASES_END() /**< NULL terminate unit test array */
1469 test_eventdev_octeontx(void)
1471 return unit_test_suite_runner(&eventdev_octeontx_testsuite);
1474 REGISTER_TEST_COMMAND(eventdev_octeontx_autotest, test_eventdev_octeontx);