const flow_id_t flow = (uintptr_t)m % nb_flows;
/* Maintain seq number per flow */
- m->seqn = producer_flow_seq[flow]++;
+ *order_mbuf_seqn(t, m) = producer_flow_seq[flow]++;
order_flow_id_save(t, flow, m, &ev);
while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
.size = sizeof(flow_id_t),
.align = __alignof__(flow_id_t),
};
+ static const struct rte_mbuf_dynfield seqn_dynfield_desc = {
+ .name = "test_event_dynfield_seqn",
+ .size = sizeof(seqn_t),
+ .align = __alignof__(seqn_t),
+ };
test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order),
RTE_CACHE_LINE_SIZE, opt->socket_id);
return -rte_errno;
}
+ t->seqn_dynfield_offset =
+ rte_mbuf_dynfield_register(&seqn_dynfield_desc);
+ if (t->seqn_dynfield_offset < 0) {
+ evt_err("failed to register mbuf field");
+ return -rte_errno;
+ }
+
t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq",
sizeof(*t->producer_flow_seq) * opt->nb_flows,
RTE_CACHE_LINE_SIZE, opt->socket_id);
#define BURST_SIZE 16
typedef uint32_t flow_id_t;
+typedef uint32_t seqn_t;
struct test_order;
uint64_t nb_pkts;
struct rte_mempool *pool;
int flow_id_dynfield_offset;
+ int seqn_dynfield_offset;
struct prod_data prod;
struct worker_data worker[EVT_MAX_PORTS];
uint32_t *producer_flow_seq;
event->mbuf = mbuf;
}
+static inline seqn_t *
+order_mbuf_seqn(struct test_order *t, struct rte_mbuf *mbuf)
+{
+ return RTE_MBUF_DYNFIELD(mbuf, t->seqn_dynfield_offset, seqn_t *);
+}
+
static inline int
order_nb_event_ports(struct evt_options *opt)
{
{
const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
/* compare the seqn against expected value */
- if (ev->mbuf->seqn != expected_flow_seq[flow]) {
+ if (*order_mbuf_seqn(t, ev->mbuf) != expected_flow_seq[flow]) {
evt_err("flow=%x seqn mismatch got=%x expected=%x",
- flow, ev->mbuf->seqn, expected_flow_seq[flow]);
+ flow, *order_mbuf_seqn(t, ev->mbuf),
+ expected_flow_seq[flow]);
t->err = true;
rte_smp_wmb();
}