m = rte_pktmbuf_alloc(eventdev_test_mempool);
RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
- m->seqn = i;
+ *rte_event_pmd_selftest_seqn(m) = i;
update_event_and_validation_attr(m, &ev, flow_id, event_type,
sub_event_type, sched_type, queue, port);
rte_event_enqueue_burst(evdev, port, &ev, 1);
valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
RTE_TEST_ASSERT_SUCCESS(valid_event,
- "Unexpected valid event=%d", ev.mbuf->seqn);
+ "Unexpected valid event=%d",
+ *rte_event_pmd_selftest_seqn(ev.mbuf));
}
return 0;
}
validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
{
RTE_SET_USED(port);
- RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
- index, ev->mbuf->seqn);
+ RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),
+ "index=%d != seqn=%d", index,
+ *rte_event_pmd_selftest_seqn(ev->mbuf));
return 0;
}
expected_val += ev->queue_id;
RTE_SET_USED(port);
- RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
- "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
- ev->mbuf->seqn, index, expected_val, range,
- queue_count, MAX_EVENTS);
+ RTE_TEST_ASSERT_EQUAL(*rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,
+ "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+ *rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val, range,
+ queue_count, MAX_EVENTS);
return 0;
}
m = rte_pktmbuf_alloc(eventdev_test_mempool);
RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
- m->seqn = i;
+ *rte_event_pmd_selftest_seqn(m) = i;
queue = i % queue_count;
update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
static inline int
-launch_workers_and_wait(int (*master_worker)(void *),
- int (*slave_workers)(void *), uint32_t total_events,
+launch_workers_and_wait(int (*main_worker)(void *),
+ int (*worker)(void *), uint32_t total_events,
uint8_t nb_workers, uint8_t sched_type)
{
uint8_t port = 0;
ret = rte_event_dequeue_timeout_ticks(evdev,
rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
- if (ret)
+ if (ret) {
+ free(param);
return -1;
+ }
param[0].total_events = &atomic_total_events;
param[0].sched_type = sched_type;
w_lcore = rte_get_next_lcore(
/* start core */ -1,
- /* skip master */ 1,
+ /* skip main */ 1,
/* wrap */ 0);
- rte_eal_remote_launch(master_worker, ¶m[0], w_lcore);
+ rte_eal_remote_launch(main_worker, ¶m[0], w_lcore);
for (port = 1; port < nb_workers; port++) {
param[port].total_events = &atomic_total_events;
param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
rte_smp_wmb();
w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
- rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore);
+ rte_eal_remote_launch(worker, ¶m[port], w_lcore);
}
ret = wait_workers_to_join(w_lcore, &atomic_total_events);
nr_ports, 0xff /* invalid */);
}
+static
+void flush(uint8_t dev_id, struct rte_event event, void *arg)
+{
+ unsigned int *count = arg;
+
+ RTE_SET_USED(dev_id);
+ if (event.event_type == RTE_EVENT_TYPE_CPU)
+ *count = *count + 1;
+
+}
+
+static int
+test_dev_stop_flush(void)
+{
+ unsigned int total_events = MAX_EVENTS, count = 0;
+ int ret;
+
+ ret = generate_random_events(total_events);
+ if (ret)
+ return -1;
+
+ ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
+ if (ret)
+ return -2;
+ rte_event_dev_stop(evdev);
+ ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
+ if (ret)
+ return -3;
+ RTE_TEST_ASSERT_EQUAL(total_events, count,
+ "count mismatch total_events=%d count=%d",
+ total_events, count);
+ return 0;
+}
+
static int
validate_queue_to_port_single_link(uint32_t index, uint8_t port,
struct rte_event *ev)
ev.op = RTE_EVENT_OP_FORWARD;
rte_event_enqueue_burst(evdev, port, &ev, 1);
} else if (ev.sub_event_type == 1) { /* Events from stage 1*/
- if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) {
rte_pktmbuf_free(ev.mbuf);
rte_atomic32_sub(total_events, 1);
} else {
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(
0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
ev.op = RTE_EVENT_OP_FORWARD;
rte_event_enqueue_burst(evdev, port, &ev, 1);
} else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
- if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) {
rte_pktmbuf_free(ev.mbuf);
rte_atomic32_sub(total_events, 1);
} else {
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(
0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(
0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
if (m == NULL)
continue;
- m->seqn = counter++;
+ *rte_event_pmd_selftest_seqn(m) = counter++;
struct rte_event ev = {.event = 0, .u64 = 0};
test_simple_enqdeq_parallel);
OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
test_multi_queue_enq_single_port_deq);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_dev_stop_flush);
OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
test_multi_queue_enq_multi_port_deq);
OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,