net/igc: fix build with gcc optimization level 0
[dpdk.git] / drivers / event / octeontx / ssovf_evdev_selftest.c
index 3866ba9..528f99d 100644 (file)
@@ -300,7 +300,7 @@ inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
                m = rte_pktmbuf_alloc(eventdev_test_mempool);
                RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
 
-               m->seqn = i;
+               *rte_event_pmd_selftest_seqn(m) = i;
                update_event_and_validation_attr(m, &ev, flow_id, event_type,
                        sub_event_type, sched_type, queue, port);
                rte_event_enqueue_burst(evdev, port, &ev, 1);
@@ -320,7 +320,8 @@ check_excess_events(uint8_t port)
                valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
 
                RTE_TEST_ASSERT_SUCCESS(valid_event,
-                               "Unexpected valid event=%d", ev.mbuf->seqn);
+                       "Unexpected valid event=%d",
+                       *rte_event_pmd_selftest_seqn(ev.mbuf));
        }
        return 0;
 }
@@ -425,8 +426,9 @@ static int
 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
 {
        RTE_SET_USED(port);
-       RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
-                       index, ev->mbuf->seqn);
+       RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),
+               "index=%d != seqn=%d", index,
+               *rte_event_pmd_selftest_seqn(ev->mbuf));
        return 0;
 }
 
@@ -509,10 +511,10 @@ validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
 
        expected_val += ev->queue_id;
        RTE_SET_USED(port);
-       RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
-       "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
-                       ev->mbuf->seqn, index, expected_val, range,
-                       queue_count, MAX_EVENTS);
+       RTE_TEST_ASSERT_EQUAL(*rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,
+               "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+               *rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val, range,
+               queue_count, MAX_EVENTS);
        return 0;
 }
 
@@ -537,7 +539,7 @@ test_multi_queue_priority(void)
                m = rte_pktmbuf_alloc(eventdev_test_mempool);
                RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
 
-               m->seqn = i;
+               *rte_event_pmd_selftest_seqn(m) = i;
                queue = i % queue_count;
                update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
                        0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
@@ -601,8 +603,8 @@ wait_workers_to_join(int lcore, const rte_atomic32_t *count)
 
 
 static inline int
-launch_workers_and_wait(int (*master_worker)(void *),
-                       int (*slave_workers)(void *), uint32_t total_events,
+launch_workers_and_wait(int (*main_worker)(void *),
+                       int (*worker)(void *), uint32_t total_events,
                        uint8_t nb_workers, uint8_t sched_type)
 {
        uint8_t port = 0;
@@ -624,8 +626,10 @@ launch_workers_and_wait(int (*master_worker)(void *),
 
        ret = rte_event_dequeue_timeout_ticks(evdev,
                rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
-       if (ret)
+       if (ret) {
+               free(param);
                return -1;
+       }
 
        param[0].total_events = &atomic_total_events;
        param[0].sched_type = sched_type;
@@ -635,9 +639,9 @@ launch_workers_and_wait(int (*master_worker)(void *),
 
        w_lcore = rte_get_next_lcore(
                        /* start core */ -1,
-                       /* skip master */ 1,
+                       /* skip main */ 1,
                        /* wrap */ 0);
-       rte_eal_remote_launch(master_worker, &param[0], w_lcore);
+       rte_eal_remote_launch(main_worker, &param[0], w_lcore);
 
        for (port = 1; port < nb_workers; port++) {
                param[port].total_events = &atomic_total_events;
@@ -646,7 +650,7 @@ launch_workers_and_wait(int (*master_worker)(void *),
                param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
                rte_smp_wmb();
                w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
-               rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
+               rte_eal_remote_launch(worker, &param[port], w_lcore);
        }
 
        ret = wait_workers_to_join(w_lcore, &atomic_total_events);
@@ -686,6 +690,40 @@ test_multi_queue_enq_multi_port_deq(void)
                                        nr_ports, 0xff /* invalid */);
 }
 
+static
+void flush(uint8_t dev_id, struct rte_event event, void *arg)
+{
+       unsigned int *count = arg;
+
+       RTE_SET_USED(dev_id);
+       if (event.event_type == RTE_EVENT_TYPE_CPU)
+               *count = *count + 1;
+
+}
+
+static int
+test_dev_stop_flush(void)
+{
+       unsigned int total_events = MAX_EVENTS, count = 0;
+       int ret;
+
+       ret = generate_random_events(total_events);
+       if (ret)
+               return -1;
+
+       ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
+       if (ret)
+               return -2;
+       rte_event_dev_stop(evdev);
+       ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
+       if (ret)
+               return -3;
+       RTE_TEST_ASSERT_EQUAL(total_events, count,
+                               "count mismatch total_events=%d count=%d",
+                               total_events, count);
+       return 0;
+}
+
 static int
 validate_queue_to_port_single_link(uint32_t index, uint8_t port,
                        struct rte_event *ev)
@@ -868,7 +906,7 @@ worker_flow_based_pipeline(void *arg)
                        ev.op = RTE_EVENT_OP_FORWARD;
                        rte_event_enqueue_burst(evdev, port, &ev, 1);
                } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
-                       if (seqn_list_update(ev.mbuf->seqn) == 0) {
+                       if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) {
                                rte_pktmbuf_free(ev.mbuf);
                                rte_atomic32_sub(total_events, 1);
                        } else {
@@ -903,7 +941,7 @@ test_multiport_flow_sched_type_test(uint8_t in_sched_type,
                return 0;
        }
 
-       /* Injects events with m->seqn=0 to total_events */
+       /* Injects events with a 0 sequence number to total_events */
        ret = inject_events(
                0x1 /*flow_id */,
                RTE_EVENT_TYPE_CPU /* event_type */,
@@ -1023,7 +1061,7 @@ worker_group_based_pipeline(void *arg)
                        ev.op = RTE_EVENT_OP_FORWARD;
                        rte_event_enqueue_burst(evdev, port, &ev, 1);
                } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
-                       if (seqn_list_update(ev.mbuf->seqn) == 0) {
+                       if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) {
                                rte_pktmbuf_free(ev.mbuf);
                                rte_atomic32_sub(total_events, 1);
                        } else {
@@ -1065,7 +1103,7 @@ test_multiport_queue_sched_type_test(uint8_t in_sched_type,
                return 0;
        }
 
-       /* Injects events with m->seqn=0 to total_events */
+       /* Injects events with a 0 sequence number to total_events */
        ret = inject_events(
                0x1 /*flow_id */,
                RTE_EVENT_TYPE_CPU /* event_type */,
@@ -1202,7 +1240,7 @@ launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
                return 0;
        }
 
-       /* Injects events with m->seqn=0 to total_events */
+       /* Injects events with a 0 sequence number to total_events */
        ret = inject_events(
                0x1 /*flow_id */,
                RTE_EVENT_TYPE_CPU /* event_type */,
@@ -1324,7 +1362,7 @@ worker_ordered_flow_producer(void *arg)
                if (m == NULL)
                        continue;
 
-               m->seqn = counter++;
+               *rte_event_pmd_selftest_seqn(m) = counter++;
 
                struct rte_event ev = {.event = 0, .u64 = 0};
 
@@ -1412,6 +1450,8 @@ test_eventdev_octeontx(void)
                        test_simple_enqdeq_parallel);
        OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
                        test_multi_queue_enq_single_port_deq);
+       OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+                       test_dev_stop_flush);
        OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
                        test_multi_queue_enq_multi_port_deq);
        OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,