The order test stored the flow ID in the deprecated mbuf field udata64.
It is moved to a dynamic field in order to allow removal of udata64.
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
}
if (!flow_id_cap)
- ev.flow_id = ev.mbuf->udata64;
+ order_flow_id_copy_from_mbuf(t, &ev);
if (ev.sub_event_type == 0) { /* stage 0 from producer */
order_atq_process_stage_0(&ev);
for (i = 0; i < nb_rx; i++) {
if (!flow_id_cap)
- ev[i].flow_id = ev[i].mbuf->udata64;
+ order_flow_id_copy_from_mbuf(t, &ev[i]);
if (ev[i].sub_event_type == 0) { /*stage 0 */
order_atq_process_stage_0(&ev[i]);
if (m == NULL)
continue;
- const uint32_t flow = (uintptr_t)m % nb_flows;
+ const flow_id_t flow = (uintptr_t)m % nb_flows;
/* Maintain seq number per flow */
m->seqn = producer_flow_seq[flow]++;
- m->udata64 = flow;
-
- ev.flow_id = flow;
- ev.mbuf = m;
+ order_flow_id_save(t, flow, m, &ev);
while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
if (t->err)
order_test_setup(struct evt_test *test, struct evt_options *opt)
{
void *test_order;
+ struct test_order *t;
+ static const struct rte_mbuf_dynfield flow_id_dynfield_desc = {
+ .name = "test_event_dynfield_flow_id",
+ .size = sizeof(flow_id_t),
+ .align = __alignof__(flow_id_t),
+ };
test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order),
RTE_CACHE_LINE_SIZE, opt->socket_id);
goto nomem;
}
test->test_priv = test_order;
+ t = evt_test_priv(test);
- struct test_order *t = evt_test_priv(test);
+ t->flow_id_dynfield_offset =
+ rte_mbuf_dynfield_register(&flow_id_dynfield_desc);
+ if (t->flow_id_dynfield_offset < 0) {
+ evt_err("failed to register mbuf field");
+ return -rte_errno;
+ }
t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq",
sizeof(*t->producer_flow_seq) * opt->nb_flows,
#include <rte_lcore.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
+#include <rte_mbuf_dyn.h>
#include "evt_common.h"
#include "evt_options.h"
#define BURST_SIZE 16
+typedef uint32_t flow_id_t;
+
struct test_order;
struct worker_data {
uint32_t nb_flows;
uint64_t nb_pkts;
struct rte_mempool *pool;
+ int flow_id_dynfield_offset;
struct prod_data prod;
struct worker_data worker[EVT_MAX_PORTS];
uint32_t *producer_flow_seq;
struct evt_options *opt;
} __rte_cache_aligned;
+static inline void
+order_flow_id_copy_from_mbuf(struct test_order *t, struct rte_event *event)
+{
+ event->flow_id = *RTE_MBUF_DYNFIELD(event->mbuf,
+ t->flow_id_dynfield_offset, flow_id_t *);
+}
+
+static inline void
+order_flow_id_save(struct test_order *t, flow_id_t flow_id,
+ struct rte_mbuf *mbuf, struct rte_event *event)
+{
+ *RTE_MBUF_DYNFIELD(mbuf,
+ t->flow_id_dynfield_offset, flow_id_t *) = flow_id;
+ event->flow_id = flow_id;
+ event->mbuf = mbuf;
+}
+
static inline int
order_nb_event_ports(struct evt_options *opt)
{
}
if (!flow_id_cap)
- ev.flow_id = ev.mbuf->udata64;
+ order_flow_id_copy_from_mbuf(t, &ev);
if (ev.queue_id == 0) { /* from ordered queue */
order_queue_process_stage_0(&ev);
for (i = 0; i < nb_rx; i++) {
if (!flow_id_cap)
- ev[i].flow_id = ev[i].mbuf->udata64;
+ order_flow_id_copy_from_mbuf(t, &ev[i]);
if (ev[i].queue_id == 0) { /* from ordered queue */
order_queue_process_stage_0(&ev[i]);