vdev: remove eal prefix
[dpdk.git] / test / test / test_eventdev_octeontx.c
index 03590da..9e95722 100644 (file)
@@ -115,7 +115,7 @@ testsuite_setup(void)
        if (evdev < 0) {
                printf("%d: Eventdev %s not found - creating.\n",
                                __LINE__, eventdev_name);
-               if (rte_eal_vdev_init(eventdev_name, NULL) < 0) {
+               if (rte_vdev_init(eventdev_name, NULL) < 0) {
                        printf("Error creating eventdev %s\n", eventdev_name);
                        return TEST_FAILED;
                }
@@ -244,6 +244,12 @@ eventdev_setup_priority(void)
        return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
 }
 
+static inline int
+eventdev_setup_dequeue_timeout(void)
+{
+       return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
+}
+
 static inline void
 eventdev_teardown(void)
 {
@@ -1203,6 +1209,111 @@ test_multi_port_queue_max_stages_random_sched_type(void)
        return launch_multi_port_max_stages_random_sched_type(
                worker_queue_based_pipeline_max_stages_rand_sched_type);
 }
+
+static int
+worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
+{
+       struct test_core_param *param = arg;
+       struct rte_event ev;
+       uint16_t valid_event;
+       uint8_t port = param->port;
+       uint8_t nr_queues = rte_event_queue_count(evdev);
+       rte_atomic32_t *total_events = param->total_events;
+
+       while (rte_atomic32_read(total_events) > 0) {
+               valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+               if (!valid_event)
+                       continue;
+
+               if (ev.queue_id == nr_queues - 1) { /* Last stage */
+                       rte_pktmbuf_free(ev.mbuf);
+                       rte_atomic32_sub(total_events, 1);
+               } else {
+                       ev.event_type = RTE_EVENT_TYPE_CPU;
+                       ev.queue_id++;
+                       ev.sub_event_type = rte_rand() % 256;
+                       ev.sched_type =
+                               rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+                       ev.op = RTE_EVENT_OP_FORWARD;
+                       rte_event_enqueue_burst(evdev, port, &ev, 1);
+               }
+       }
+       return 0;
+}
+
+/* Queue and flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_mixed_max_stages_random_sched_type(void)
+{
+       return launch_multi_port_max_stages_random_sched_type(
+               worker_mixed_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_ordered_flow_producer(void *arg)
+{
+       struct test_core_param *param = arg;
+       uint8_t port = param->port;
+       struct rte_mbuf *m;
+       int counter = 0;
+
+       while (counter < NUM_PACKETS) {
+               m = rte_pktmbuf_alloc(eventdev_test_mempool);
+               if (m == NULL)
+                       continue;
+
+               m->seqn = counter++;
+
+               struct rte_event ev = {.event = 0, .u64 = 0};
+
+               ev.flow_id = 0x1; /* Generate a fat flow */
+               ev.sub_event_type = 0;
+               /* Inject the new event */
+               ev.op = RTE_EVENT_OP_NEW;
+               ev.event_type = RTE_EVENT_TYPE_CPU;
+               ev.sched_type = RTE_SCHED_TYPE_ORDERED;
+               ev.queue_id = 0;
+               ev.mbuf = m;
+               rte_event_enqueue_burst(evdev, port, &ev, 1);
+       }
+
+       return 0;
+}
+
+static inline int
+test_producer_consumer_ingress_order_test(int (*fn)(void *))
+{
+       uint8_t nr_ports;
+
+       nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
+
+       if (rte_lcore_count() < 3 || nr_ports < 2) {
+               printf("### Not enough cores for %s test.\n", __func__);
+               return TEST_SUCCESS;
+       }
+
+       launch_workers_and_wait(worker_ordered_flow_producer, fn,
+                               NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
+       /* Check the events order maintained or not */
+       return seqn_list_check(NUM_PACKETS);
+}
+
+/* Flow based producer consumer ingress order test */
+static int
+test_flow_producer_consumer_ingress_order_test(void)
+{
+       return test_producer_consumer_ingress_order_test(
+                               worker_flow_based_pipeline);
+}
+
+/* Queue based producer consumer ingress order test */
+static int
+test_queue_producer_consumer_ingress_order_test(void)
+{
+       return test_producer_consumer_ingress_order_test(
+                               worker_group_based_pipeline);
+}
+
 static struct unit_test_suite eventdev_octeontx_testsuite  = {
        .suite_name = "eventdev octeontx unit test suite",
        .setup = testsuite_setup,
@@ -1264,6 +1375,17 @@ static struct unit_test_suite eventdev_octeontx_testsuite  = {
                        test_multi_port_flow_max_stages_random_sched_type),
                TEST_CASE_ST(eventdev_setup, eventdev_teardown,
                        test_multi_port_queue_max_stages_random_sched_type),
+               TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+                       test_multi_port_mixed_max_stages_random_sched_type),
+               TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+                       test_flow_producer_consumer_ingress_order_test),
+               TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+                       test_queue_producer_consumer_ingress_order_test),
+               /* Tests with dequeue timeout */
+               TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
+                       test_multi_port_flow_ordered_to_atomic),
+               TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
+                       test_multi_port_queue_ordered_to_atomic),
                TEST_CASES_END() /**< NULL terminate unit test array */
        }
 };