m = rte_pktmbuf_alloc(eventdev_test_mempool);
RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
- m->seqn = i;
+ *rte_event_pmd_selftest_seqn(m) = i;
update_event_and_validation_attr(m, &ev, flow_id, event_type,
sub_event_type, sched_type,
queue, port);
RTE_TEST_ASSERT_SUCCESS(valid_event,
"Unexpected valid event=%d",
- ev.mbuf->seqn);
+ *rte_event_pmd_selftest_seqn(ev.mbuf));
}
return 0;
}
validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
{
RTE_SET_USED(port);
- RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
- index, ev->mbuf->seqn);
+ RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),
+ "index=%d != seqn=%d",
+ index, *rte_event_pmd_selftest_seqn(ev->mbuf));
return 0;
}
expected_val += ev->queue_id;
RTE_SET_USED(port);
- RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
- "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
- ev->mbuf->seqn, index, expected_val, range,
- queue_count, MAX_EVENTS);
+ RTE_TEST_ASSERT_EQUAL(
+ *rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,
+ "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+ *rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val,
+ range, queue_count, MAX_EVENTS);
return 0;
}
m = rte_pktmbuf_alloc(eventdev_test_mempool);
RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
- m->seqn = i;
+ *rte_event_pmd_selftest_seqn(m) = i;
queue = i % queue_count;
update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
0, RTE_SCHED_TYPE_PARALLEL,
ev.op = RTE_EVENT_OP_FORWARD;
rte_event_enqueue_burst(evdev, port, &ev, 1);
} else if (ev.sub_event_type == 1) { /* Events from stage 1*/
- if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ uint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);
+
+ if (seqn_list_update(seqn) == 0) {
rte_pktmbuf_free(ev.mbuf);
rte_atomic32_sub(total_events, 1);
} else {
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
0 /* sub_event_type (stage 0) */,
ev.op = RTE_EVENT_OP_FORWARD;
rte_event_enqueue_burst(evdev, port, &ev, 1);
} else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
- if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ uint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);
+
+ if (seqn_list_update(seqn) == 0) {
rte_pktmbuf_free(ev.mbuf);
rte_atomic32_sub(total_events, 1);
} else {
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
0 /* sub_event_type (stage 0) */,
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
0 /* sub_event_type (stage 0) */,
if (m == NULL)
continue;
- m->seqn = counter++;
+ *rte_event_pmd_selftest_seqn(m) = counter++;
struct rte_event ev = {.event = 0, .u64 = 0};