/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
order_atq_process_stage_0(struct rte_event *const ev)
{
ev->sub_event_type = 1; /* move to stage 1 (atomic) on the same queue */
}
static int
-order_atq_worker(void *arg)
+order_atq_worker(void *arg, const bool flow_id_cap)
{
ORDER_WORKER_INIT;
struct rte_event ev;
continue;
}
+ if (!flow_id_cap)
+ order_flow_id_copy_from_mbuf(t, &ev);
+
if (ev.sub_event_type == 0) { /* stage 0 from producer */
order_atq_process_stage_0(&ev);
while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
}
static int
-order_atq_worker_burst(void *arg)
+order_atq_worker_burst(void *arg, const bool flow_id_cap)
{
ORDER_WORKER_INIT;
struct rte_event ev[BURST_SIZE];
}
for (i = 0; i < nb_rx; i++) {
+ if (!flow_id_cap)
+ order_flow_id_copy_from_mbuf(t, &ev[i]);
+
if (ev[i].sub_event_type == 0) { /*stage 0 */
order_atq_process_stage_0(&ev[i]);
} else if (ev[i].sub_event_type == 1) { /* stage 1 */
{
struct worker_data *w = arg;
const bool burst = evt_has_burst_mode(w->dev_id);
-
- if (burst)
- return order_atq_worker_burst(arg);
- else
- return order_atq_worker(arg);
+ const bool flow_id_cap = evt_has_flow_id(w->dev_id);
+
+ if (burst) {
+ if (flow_id_cap)
+ return order_atq_worker_burst(arg, true);
+ else
+ return order_atq_worker_burst(arg, false);
+ } else {
+ if (flow_id_cap)
+ return order_atq_worker(arg, true);
+ else
+ return order_atq_worker(arg, false);
+ }
}
static int