X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=test%2Ftest%2Ftest_eventdev_octeontx.c;h=fffb4a80ed6e049aae1c37cc7c1f61017a3911ee;hb=f1ae15bac8aaa75aeb4fa886fd4d74d34c373193;hp=cf79242f60995e910fee69419c8e75b5d57c43c8;hpb=19e2646879c768afaa6c8d7ab99336ff12cdb3bd;p=dpdk.git diff --git a/test/test/test_eventdev_octeontx.c b/test/test/test_eventdev_octeontx.c index cf79242f60..fffb4a80ed 100644 --- a/test/test/test_eventdev_octeontx.c +++ b/test/test/test_eventdev_octeontx.c @@ -115,7 +115,7 @@ testsuite_setup(void) if (evdev < 0) { printf("%d: Eventdev %s not found - creating.\n", __LINE__, eventdev_name); - if (rte_eal_vdev_init(eventdev_name, NULL) < 0) { + if (rte_vdev_init(eventdev_name, NULL) < 0) { printf("Error creating eventdev %s\n", eventdev_name); return TEST_FAILED; } @@ -194,6 +194,11 @@ _eventdev_setup(int mode) TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev"); if (mode == TEST_EVENTDEV_SETUP_PRIORITY) { + if (rte_event_queue_count(evdev) > 8) { + printf("test expects the unique priority per queue\n"); + return -ENOTSUP; + } + /* Configure event queues(0 to n) with * RTE_EVENT_DEV_PRIORITY_HIGHEST to * RTE_EVENT_DEV_PRIORITY_LOWEST @@ -244,6 +249,12 @@ eventdev_setup_priority(void) return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY); } +static inline int +eventdev_setup_dequeue_timeout(void) +{ + return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT); +} + static inline void eventdev_teardown(void) { @@ -1098,6 +1109,216 @@ test_multi_port_queue_parallel_to_parallel(void) return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL, RTE_SCHED_TYPE_PARALLEL); } + +static int +worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg) +{ + struct test_core_param *param = arg; + struct rte_event ev; + uint16_t valid_event; + uint8_t port = param->port; + rte_atomic32_t *total_events = param->total_events; + + while (rte_atomic32_read(total_events) > 0) { + valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); + if (!valid_event) + continue; + + if (ev.sub_event_type == 255) { /* last stage */ + rte_pktmbuf_free(ev.mbuf); + rte_atomic32_sub(total_events, 1); + } else { + ev.event_type = RTE_EVENT_TYPE_CPU; + ev.sub_event_type++; + ev.sched_type = + rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1); + ev.op = RTE_EVENT_OP_FORWARD; + rte_event_enqueue_burst(evdev, port, &ev, 1); + } + } + return 0; +} + +static int +launch_multi_port_max_stages_random_sched_type(int (*fn)(void *)) +{ + uint8_t nr_ports; + int ret; + + nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1); + + if (!nr_ports) { + printf("%s: Not enough ports=%d or workers=%d\n", __func__, + rte_event_port_count(evdev), rte_lcore_count() - 1); + return TEST_SUCCESS; + } + + /* Injects events with m->seqn=0 to total_events */ + ret = inject_events( + 0x1 /*flow_id */, + RTE_EVENT_TYPE_CPU /* event_type */, + 0 /* sub_event_type (stage 0) */, + rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */, + 0 /* queue */, + 0 /* port */, + MAX_EVENTS /* events */); + if (ret) + return TEST_FAILED; + + return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports, + 0xff /* invalid */); +} + +/* Flow based pipeline with maximum stages with random sched type */ +static int +test_multi_port_flow_max_stages_random_sched_type(void) +{ + return launch_multi_port_max_stages_random_sched_type( + worker_flow_based_pipeline_max_stages_rand_sched_type); +} + +static int +worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg) +{ + struct test_core_param *param = arg; + struct rte_event ev; + uint16_t valid_event; + uint8_t port = param->port; + uint8_t nr_queues = rte_event_queue_count(evdev); + rte_atomic32_t *total_events = param->total_events; + + while (rte_atomic32_read(total_events) > 0) { + valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); + if (!valid_event) + continue; + + if (ev.queue_id == nr_queues - 1) { /* last stage */ + rte_pktmbuf_free(ev.mbuf); + rte_atomic32_sub(total_events, 1); + } else { + ev.event_type = RTE_EVENT_TYPE_CPU; + ev.queue_id++; + ev.sched_type = + rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1); + ev.op = RTE_EVENT_OP_FORWARD; + rte_event_enqueue_burst(evdev, port, &ev, 1); + } + } + return 0; +} + +/* Queue based pipeline with maximum stages with random sched type */ +static int +test_multi_port_queue_max_stages_random_sched_type(void) +{ + return launch_multi_port_max_stages_random_sched_type( + worker_queue_based_pipeline_max_stages_rand_sched_type); +} + +static int +worker_mixed_pipeline_max_stages_rand_sched_type(void *arg) +{ + struct test_core_param *param = arg; + struct rte_event ev; + uint16_t valid_event; + uint8_t port = param->port; + uint8_t nr_queues = rte_event_queue_count(evdev); + rte_atomic32_t *total_events = param->total_events; + + while (rte_atomic32_read(total_events) > 0) { + valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); + if (!valid_event) + continue; + + if (ev.queue_id == nr_queues - 1) { /* Last stage */ + rte_pktmbuf_free(ev.mbuf); + rte_atomic32_sub(total_events, 1); + } else { + ev.event_type = RTE_EVENT_TYPE_CPU; + ev.queue_id++; + ev.sub_event_type = rte_rand() % 256; + ev.sched_type = + rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1); + ev.op = RTE_EVENT_OP_FORWARD; + rte_event_enqueue_burst(evdev, port, &ev, 1); + } + } + return 0; +} + +/* Queue and flow based pipeline with maximum stages with random sched type */ +static int +test_multi_port_mixed_max_stages_random_sched_type(void) +{ + return launch_multi_port_max_stages_random_sched_type( + worker_mixed_pipeline_max_stages_rand_sched_type); +} + +static int +worker_ordered_flow_producer(void *arg) +{ + struct test_core_param *param = arg; + uint8_t port = param->port; + struct rte_mbuf *m; + int counter = 0; + + while (counter < NUM_PACKETS) { + m = rte_pktmbuf_alloc(eventdev_test_mempool); + if (m == NULL) + continue; + + m->seqn = counter++; + + struct rte_event ev = {.event = 0, .u64 = 0}; + + ev.flow_id = 0x1; /* Generate a fat flow */ + ev.sub_event_type = 0; + /* Inject the new event */ + ev.op = RTE_EVENT_OP_NEW; + ev.event_type = RTE_EVENT_TYPE_CPU; + ev.sched_type = RTE_SCHED_TYPE_ORDERED; + ev.queue_id = 0; + ev.mbuf = m; + rte_event_enqueue_burst(evdev, port, &ev, 1); + } + + return 0; +} + +static inline int +test_producer_consumer_ingress_order_test(int (*fn)(void *)) +{ + uint8_t nr_ports; + + nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1); + + if (rte_lcore_count() < 3 || nr_ports < 2) { + printf("### Not enough cores for %s test.\n", __func__); + return TEST_SUCCESS; + } + + launch_workers_and_wait(worker_ordered_flow_producer, fn, + NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC); + /* Check the events order maintained or not */ + return seqn_list_check(NUM_PACKETS); +} + +/* Flow based producer consumer ingress order test */ +static int +test_flow_producer_consumer_ingress_order_test(void) +{ + return test_producer_consumer_ingress_order_test( + worker_flow_based_pipeline); +} + +/* Queue based producer consumer ingress order test */ +static int +test_queue_producer_consumer_ingress_order_test(void) +{ + return test_producer_consumer_ingress_order_test( + worker_group_based_pipeline); +} + static struct unit_test_suite eventdev_octeontx_testsuite = { .suite_name = "eventdev octeontx unit test suite", .setup = testsuite_setup, @@ -1155,6 +1376,21 @@ static struct unit_test_suite eventdev_octeontx_testsuite = { test_multi_port_queue_parallel_to_ordered), TEST_CASE_ST(eventdev_setup, eventdev_teardown, test_multi_port_queue_parallel_to_parallel), + TEST_CASE_ST(eventdev_setup, eventdev_teardown, + test_multi_port_flow_max_stages_random_sched_type), + TEST_CASE_ST(eventdev_setup, eventdev_teardown, + test_multi_port_queue_max_stages_random_sched_type), + TEST_CASE_ST(eventdev_setup, eventdev_teardown, + test_multi_port_mixed_max_stages_random_sched_type), + TEST_CASE_ST(eventdev_setup, eventdev_teardown, + test_flow_producer_consumer_ingress_order_test), + TEST_CASE_ST(eventdev_setup, eventdev_teardown, + test_queue_producer_consumer_ingress_order_test), + /* Tests with dequeue timeout */ + TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown, + test_multi_port_flow_ordered_to_atomic), + TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown, + test_multi_port_queue_ordered_to_atomic), TEST_CASES_END() /**< NULL terminate unit test array */ } };