net/ena: fix build with GCC 12
[dpdk.git] / app / test-eventdev / test_pipeline_queue.c
index 7da89da..e989396 100644 (file)
@@ -15,28 +15,33 @@ pipeline_queue_nb_event_queues(struct evt_options *opt)
        return (eth_count * opt->nb_stages) + eth_count;
 }
 
+typedef int (*pipeline_queue_worker_t)(void *arg);
+
 static __rte_noinline int
 pipeline_queue_worker_single_stage_tx(void *arg)
 {
        PIPELINE_WORKER_SINGLE_STAGE_INIT;
+       uint8_t enq = 0, deq = 0;
 
        while (t->done == false) {
-               uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+               deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
 
-               if (!event) {
+               if (!deq) {
                        rte_pause();
                        continue;
                }
 
                if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
-                       pipeline_event_tx(dev, port, &ev);
+                       enq = pipeline_event_tx(dev, port, &ev, t);
+                       ev.op = RTE_EVENT_OP_RELEASE;
                        w->processed_pkts++;
                } else {
                        ev.queue_id++;
                        pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
-                       pipeline_event_enqueue(dev, port, &ev);
+                       enq = pipeline_event_enqueue(dev, port, &ev, t);
                }
        }
+       pipeline_worker_cleanup(dev, port, &ev, enq, deq);
 
        return 0;
 }
@@ -46,11 +51,12 @@ pipeline_queue_worker_single_stage_fwd(void *arg)
 {
        PIPELINE_WORKER_SINGLE_STAGE_INIT;
        const uint8_t *tx_queue = t->tx_evqueue_id;
+       uint8_t enq = 0, deq = 0;
 
        while (t->done == false) {
-               uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+               deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
 
-               if (!event) {
+               if (!deq) {
                        rte_pause();
                        continue;
                }
@@ -58,9 +64,10 @@ pipeline_queue_worker_single_stage_fwd(void *arg)
                ev.queue_id = tx_queue[ev.mbuf->port];
                rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
                pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
-               pipeline_event_enqueue(dev, port, &ev);
+               enq = pipeline_event_enqueue(dev, port, &ev, t);
                w->processed_pkts++;
        }
+       pipeline_worker_cleanup(dev, port, &ev, enq, deq);
 
        return 0;
 }
@@ -69,10 +76,10 @@ static __rte_noinline int
 pipeline_queue_worker_single_stage_burst_tx(void *arg)
 {
        PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+       uint16_t nb_rx = 0, nb_tx = 0;
 
        while (t->done == false) {
-               uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
-                               BURST_SIZE, 0);
+               nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
 
                if (!nb_rx) {
                        rte_pause();
@@ -82,7 +89,7 @@ pipeline_queue_worker_single_stage_burst_tx(void *arg)
                for (i = 0; i < nb_rx; i++) {
                        rte_prefetch0(ev[i + 1].mbuf);
                        if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
-                               pipeline_event_tx(dev, port, &ev[i]);
+                               pipeline_event_tx(dev, port, &ev[i], t);
                                ev[i].op = RTE_EVENT_OP_RELEASE;
                                w->processed_pkts++;
                        } else {
@@ -91,9 +98,9 @@ pipeline_queue_worker_single_stage_burst_tx(void *arg)
                                                RTE_SCHED_TYPE_ATOMIC);
                        }
                }
-
-               pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+               nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
        }
+       pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
 
        return 0;
 }
@@ -103,10 +110,10 @@ pipeline_queue_worker_single_stage_burst_fwd(void *arg)
 {
        PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
        const uint8_t *tx_queue = t->tx_evqueue_id;
+       uint16_t nb_rx = 0, nb_tx = 0;
 
        while (t->done == false) {
-               uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
-                               BURST_SIZE, 0);
+               nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
 
                if (!nb_rx) {
                        rte_pause();
@@ -120,24 +127,152 @@ pipeline_queue_worker_single_stage_burst_fwd(void *arg)
                        pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
                }
 
-               pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+               nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
                w->processed_pkts += nb_rx;
        }
+       pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
 
        return 0;
 }
 
+static __rte_noinline int
+pipeline_queue_worker_single_stage_tx_vector(void *arg)
+{
+       PIPELINE_WORKER_SINGLE_STAGE_INIT;
+       uint8_t enq = 0, deq = 0;
+       uint16_t vector_sz;
+
+       while (!t->done) {
+               deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+               if (!deq) {
+                       rte_pause();
+                       continue;
+               }
+
+               if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+                       vector_sz = ev.vec->nb_elem;
+                       enq = pipeline_event_tx_vector(dev, port, &ev, t);
+                       ev.op = RTE_EVENT_OP_RELEASE;
+                       w->processed_pkts += vector_sz;
+               } else {
+                       ev.queue_id++;
+                       pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
+                       enq = pipeline_event_enqueue(dev, port, &ev, t);
+               }
+       }
+       pipeline_worker_cleanup(dev, port, &ev, enq, deq);
+
+       return 0;
+}
+
+static __rte_noinline int
+pipeline_queue_worker_single_stage_fwd_vector(void *arg)
+{
+       PIPELINE_WORKER_SINGLE_STAGE_INIT;
+       const uint8_t *tx_queue = t->tx_evqueue_id;
+       uint8_t enq = 0, deq = 0;
+       uint16_t vector_sz;
+
+       while (!t->done) {
+               deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+               if (!deq) {
+                       rte_pause();
+                       continue;
+               }
+
+               ev.queue_id = tx_queue[ev.vec->port];
+               ev.vec->queue = 0;
+               vector_sz = ev.vec->nb_elem;
+               pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
+               enq = pipeline_event_enqueue(dev, port, &ev, t);
+               w->processed_pkts += vector_sz;
+       }
+       pipeline_worker_cleanup(dev, port, &ev, enq, deq);
+
+       return 0;
+}
+
+static __rte_noinline int
+pipeline_queue_worker_single_stage_burst_tx_vector(void *arg)
+{
+       PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+       uint16_t nb_rx = 0, nb_tx = 0;
+       uint16_t vector_sz;
+
+       while (!t->done) {
+               nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+
+               if (!nb_rx) {
+                       rte_pause();
+                       continue;
+               }
+
+               for (i = 0; i < nb_rx; i++) {
+                       if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+                               vector_sz = ev[i].vec->nb_elem;
+                               pipeline_event_tx_vector(dev, port, &ev[i], t);
+                               ev[i].op = RTE_EVENT_OP_RELEASE;
+                               w->processed_pkts += vector_sz;
+                       } else {
+                               ev[i].queue_id++;
+                               pipeline_fwd_event_vector(
+                                       &ev[i], RTE_SCHED_TYPE_ATOMIC);
+                       }
+               }
+
+               nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
+       }
+       pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
+
+       return 0;
+}
+
+static __rte_noinline int
+pipeline_queue_worker_single_stage_burst_fwd_vector(void *arg)
+{
+       PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+       const uint8_t *tx_queue = t->tx_evqueue_id;
+       uint16_t nb_rx = 0, nb_tx = 0;
+       uint16_t vector_sz;
+
+       while (!t->done) {
+               nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+
+               if (!nb_rx) {
+                       rte_pause();
+                       continue;
+               }
+
+               vector_sz = 0;
+               for (i = 0; i < nb_rx; i++) {
+                       ev[i].queue_id = tx_queue[ev[i].vec->port];
+                       ev[i].vec->queue = 0;
+                       vector_sz += ev[i].vec->nb_elem;
+                       pipeline_fwd_event_vector(&ev[i],
+                                                 RTE_SCHED_TYPE_ATOMIC);
+               }
+
+               nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
+               w->processed_pkts += vector_sz;
+       }
+       pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
+
+       return 0;
+}
 
 static __rte_noinline int
 pipeline_queue_worker_multi_stage_tx(void *arg)
 {
        PIPELINE_WORKER_MULTI_STAGE_INIT;
        const uint8_t *tx_queue = t->tx_evqueue_id;
+       uint8_t enq = 0, deq = 0;
 
        while (t->done == false) {
-               uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+               deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
 
-               if (!event) {
+               if (!deq) {
                        rte_pause();
                        continue;
                }
@@ -145,7 +280,8 @@ pipeline_queue_worker_multi_stage_tx(void *arg)
                cq_id = ev.queue_id % nb_stages;
 
                if (ev.queue_id == tx_queue[ev.mbuf->port]) {
-                       pipeline_event_tx(dev, port, &ev);
+                       enq = pipeline_event_tx(dev, port, &ev, t);
+                       ev.op = RTE_EVENT_OP_RELEASE;
                        w->processed_pkts++;
                        continue;
                }
@@ -154,8 +290,9 @@ pipeline_queue_worker_multi_stage_tx(void *arg)
                pipeline_fwd_event(&ev, cq_id != last_queue ?
                                sched_type_list[cq_id] :
                                RTE_SCHED_TYPE_ATOMIC);
-               pipeline_event_enqueue(dev, port, &ev);
+               enq = pipeline_event_enqueue(dev, port, &ev, t);
        }
+       pipeline_worker_cleanup(dev, port, &ev, enq, deq);
 
        return 0;
 }
@@ -165,11 +302,12 @@ pipeline_queue_worker_multi_stage_fwd(void *arg)
 {
        PIPELINE_WORKER_MULTI_STAGE_INIT;
        const uint8_t *tx_queue = t->tx_evqueue_id;
+       uint8_t enq = 0, deq = 0;
 
        while (t->done == false) {
-               uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+               deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
 
-               if (!event) {
+               if (!deq) {
                        rte_pause();
                        continue;
                }
@@ -180,14 +318,15 @@ pipeline_queue_worker_multi_stage_fwd(void *arg)
                        ev.queue_id = tx_queue[ev.mbuf->port];
                        rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
                        pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+                       enq = pipeline_event_enqueue(dev, port, &ev, t);
                        w->processed_pkts++;
                } else {
                        ev.queue_id++;
                        pipeline_fwd_event(&ev, sched_type_list[cq_id]);
+                       enq = pipeline_event_enqueue(dev, port, &ev, t);
                }
-
-               pipeline_event_enqueue(dev, port, &ev);
        }
+       pipeline_worker_cleanup(dev, port, &ev, enq, deq);
 
        return 0;
 }
@@ -197,10 +336,10 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg)
 {
        PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
        const uint8_t *tx_queue = t->tx_evqueue_id;
+       uint16_t nb_rx = 0, nb_tx = 0;
 
        while (t->done == false) {
-               uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
-                               BURST_SIZE, 0);
+               nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
 
                if (!nb_rx) {
                        rte_pause();
@@ -212,7 +351,7 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg)
                        cq_id = ev[i].queue_id % nb_stages;
 
                        if (ev[i].queue_id == tx_queue[ev[i].mbuf->port]) {
-                               pipeline_event_tx(dev, port, &ev[i]);
+                               pipeline_event_tx(dev, port, &ev[i], t);
                                ev[i].op = RTE_EVENT_OP_RELEASE;
                                w->processed_pkts++;
                                continue;
@@ -223,9 +362,9 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg)
                                        sched_type_list[cq_id] :
                                        RTE_SCHED_TYPE_ATOMIC);
                }
-
-               pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+               nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
        }
+       pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
 
        return 0;
 }
@@ -235,10 +374,11 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
 {
        PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
        const uint8_t *tx_queue = t->tx_evqueue_id;
+       uint16_t nb_rx = 0, nb_tx = 0;
 
        while (t->done == false) {
-               uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
-                               BURST_SIZE, 0);
+               uint16_t processed_pkts = 0;
+               nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
 
                if (!nb_rx) {
                        rte_pause();
@@ -254,7 +394,7 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
                                rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
                                pipeline_fwd_event(&ev[i],
                                                RTE_SCHED_TYPE_ATOMIC);
-                               w->processed_pkts++;
+                               processed_pkts++;
                        } else {
                                ev[i].queue_id++;
                                pipeline_fwd_event(&ev[i],
@@ -262,8 +402,162 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
                        }
                }
 
-               pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+               nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
+               w->processed_pkts += processed_pkts;
+       }
+       pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
+
+       return 0;
+}
+
+static __rte_noinline int
+pipeline_queue_worker_multi_stage_tx_vector(void *arg)
+{
+       PIPELINE_WORKER_MULTI_STAGE_INIT;
+       const uint8_t *tx_queue = t->tx_evqueue_id;
+       uint8_t enq = 0, deq = 0;
+       uint16_t vector_sz;
+
+       while (!t->done) {
+               deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+               if (!deq) {
+                       rte_pause();
+                       continue;
+               }
+
+               cq_id = ev.queue_id % nb_stages;
+
+               if (ev.queue_id == tx_queue[ev.vec->port]) {
+                       vector_sz = ev.vec->nb_elem;
+                       enq = pipeline_event_tx_vector(dev, port, &ev, t);
+                       w->processed_pkts += vector_sz;
+                       ev.op = RTE_EVENT_OP_RELEASE;
+                       continue;
+               }
+
+               ev.queue_id++;
+               pipeline_fwd_event_vector(&ev, cq_id != last_queue
+                                                      ? sched_type_list[cq_id]
+                                                      : RTE_SCHED_TYPE_ATOMIC);
+               enq = pipeline_event_enqueue(dev, port, &ev, t);
+       }
+       pipeline_worker_cleanup(dev, port, &ev, enq, deq);
+
+       return 0;
+}
+
+static __rte_noinline int
+pipeline_queue_worker_multi_stage_fwd_vector(void *arg)
+{
+       PIPELINE_WORKER_MULTI_STAGE_INIT;
+       const uint8_t *tx_queue = t->tx_evqueue_id;
+       uint8_t enq = 0, deq = 0;
+       uint16_t vector_sz;
+
+       while (!t->done) {
+               deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+               if (!deq) {
+                       rte_pause();
+                       continue;
+               }
+
+               cq_id = ev.queue_id % nb_stages;
+
+               if (cq_id == last_queue) {
+                       vector_sz = ev.vec->nb_elem;
+                       ev.queue_id = tx_queue[ev.vec->port];
+                       pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
+                       w->processed_pkts += vector_sz;
+               } else {
+                       ev.queue_id++;
+                       pipeline_fwd_event_vector(&ev, sched_type_list[cq_id]);
+               }
+
+               enq = pipeline_event_enqueue(dev, port, &ev, t);
+       }
+       pipeline_worker_cleanup(dev, port, &ev, enq, deq);
+
+       return 0;
+}
+
+static __rte_noinline int
+pipeline_queue_worker_multi_stage_burst_tx_vector(void *arg)
+{
+       PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+       const uint8_t *tx_queue = t->tx_evqueue_id;
+       uint16_t nb_rx = 0, nb_tx = 0;
+       uint16_t vector_sz;
+
+       while (!t->done) {
+               nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+
+               if (!nb_rx) {
+                       rte_pause();
+                       continue;
+               }
+
+               for (i = 0; i < nb_rx; i++) {
+                       cq_id = ev[i].queue_id % nb_stages;
+
+                       if (ev[i].queue_id == tx_queue[ev[i].vec->port]) {
+                               vector_sz = ev[i].vec->nb_elem;
+                               pipeline_event_tx_vector(dev, port, &ev[i], t);
+                               ev[i].op = RTE_EVENT_OP_RELEASE;
+                               w->processed_pkts += vector_sz;
+                               continue;
+                       }
+
+                       ev[i].queue_id++;
+                       pipeline_fwd_event_vector(
+                               &ev[i], cq_id != last_queue
+                                               ? sched_type_list[cq_id]
+                                               : RTE_SCHED_TYPE_ATOMIC);
+               }
+
+               nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
+       }
+       pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
+
+       return 0;
+}
+
+static __rte_noinline int
+pipeline_queue_worker_multi_stage_burst_fwd_vector(void *arg)
+{
+       PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+       const uint8_t *tx_queue = t->tx_evqueue_id;
+       uint16_t nb_rx = 0, nb_tx = 0;
+       uint16_t vector_sz;
+
+       while (!t->done) {
+               nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+
+               if (!nb_rx) {
+                       rte_pause();
+                       continue;
+               }
+
+               for (i = 0; i < nb_rx; i++) {
+                       cq_id = ev[i].queue_id % nb_stages;
+
+                       if (cq_id == last_queue) {
+                               ev[i].queue_id = tx_queue[ev[i].vec->port];
+                               vector_sz = ev[i].vec->nb_elem;
+                               pipeline_fwd_event_vector(
+                                       &ev[i], RTE_SCHED_TYPE_ATOMIC);
+                               w->processed_pkts += vector_sz;
+                       } else {
+                               ev[i].queue_id++;
+                               pipeline_fwd_event_vector(
+                                       &ev[i], sched_type_list[cq_id]);
+                       }
+               }
+
+               nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
        }
+       pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
 
        return 0;
 }
@@ -276,29 +570,39 @@ worker_wrapper(void *arg)
        const bool burst = evt_has_burst_mode(w->dev_id);
        const bool internal_port = w->t->internal_port;
        const uint8_t nb_stages = opt->nb_stages;
-       RTE_SET_USED(opt);
-
-       if (nb_stages == 1) {
-               if (!burst && internal_port)
-                       return pipeline_queue_worker_single_stage_tx(arg);
-               else if (!burst && !internal_port)
-                       return pipeline_queue_worker_single_stage_fwd(arg);
-               else if (burst && internal_port)
-                       return pipeline_queue_worker_single_stage_burst_tx(arg);
-               else if (burst && !internal_port)
-                       return pipeline_queue_worker_single_stage_burst_fwd(
-                                       arg);
-       } else {
-               if (!burst && internal_port)
-                       return pipeline_queue_worker_multi_stage_tx(arg);
-               else if (!burst && !internal_port)
-                       return pipeline_queue_worker_multi_stage_fwd(arg);
-               else if (burst && internal_port)
-                       return pipeline_queue_worker_multi_stage_burst_tx(arg);
-               else if (burst && !internal_port)
-                       return pipeline_queue_worker_multi_stage_burst_fwd(arg);
+       /*vector/burst/internal_port*/
+       const pipeline_queue_worker_t
+       pipeline_queue_worker_single_stage[2][2][2] = {
+               [0][0][0] = pipeline_queue_worker_single_stage_fwd,
+               [0][0][1] = pipeline_queue_worker_single_stage_tx,
+               [0][1][0] = pipeline_queue_worker_single_stage_burst_fwd,
+               [0][1][1] = pipeline_queue_worker_single_stage_burst_tx,
+               [1][0][0] = pipeline_queue_worker_single_stage_fwd_vector,
+               [1][0][1] = pipeline_queue_worker_single_stage_tx_vector,
+               [1][1][0] = pipeline_queue_worker_single_stage_burst_fwd_vector,
+               [1][1][1] = pipeline_queue_worker_single_stage_burst_tx_vector,
+       };
+       const pipeline_queue_worker_t
+       pipeline_queue_worker_multi_stage[2][2][2] = {
+               [0][0][0] = pipeline_queue_worker_multi_stage_fwd,
+               [0][0][1] = pipeline_queue_worker_multi_stage_tx,
+               [0][1][0] = pipeline_queue_worker_multi_stage_burst_fwd,
+               [0][1][1] = pipeline_queue_worker_multi_stage_burst_tx,
+               [1][0][0] = pipeline_queue_worker_multi_stage_fwd_vector,
+               [1][0][1] = pipeline_queue_worker_multi_stage_tx_vector,
+               [1][1][0] = pipeline_queue_worker_multi_stage_burst_fwd_vector,
+               [1][1][1] = pipeline_queue_worker_multi_stage_burst_tx_vector,
+       };
+
+       if (nb_stages == 1)
+               return (pipeline_queue_worker_single_stage[opt->ena_vector]
+                                                         [burst]
+                                                         [internal_port])(arg);
+       else
+               return (pipeline_queue_worker_multi_stage[opt->ena_vector]
+                                                        [burst]
+                                                        [internal_port])(arg);
 
-       }
        rte_panic("invalid worker\n");
 }
 
@@ -334,17 +638,7 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
        memset(queue_arr, 0, sizeof(uint8_t) * RTE_EVENT_MAX_QUEUES_PER_DEV);
 
        rte_event_dev_info_get(opt->dev_id, &info);
-       const struct rte_event_dev_config config = {
-                       .nb_event_queues = nb_queues,
-                       .nb_event_ports = nb_ports,
-                       .nb_events_limit  = info.max_num_events,
-                       .nb_event_queue_flows = opt->nb_flows,
-                       .nb_event_port_dequeue_depth =
-                               info.max_event_port_dequeue_depth,
-                       .nb_event_port_enqueue_depth =
-                               info.max_event_port_enqueue_depth,
-       };
-       ret = rte_event_dev_configure(opt->dev_id, &config);
+       ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
        if (ret) {
                evt_err("failed to configure eventdev %d", opt->dev_id);
                return ret;
@@ -533,6 +827,7 @@ static const struct evt_test_ops pipeline_queue =  {
        .ethdev_setup       = pipeline_ethdev_setup,
        .eventdev_setup     = pipeline_queue_eventdev_setup,
        .launch_lcores      = pipeline_queue_launch_lcores,
+       .ethdev_rx_stop     = pipeline_ethdev_rx_stop,
        .eventdev_destroy   = pipeline_eventdev_destroy,
        .mempool_destroy    = pipeline_mempool_destroy,
        .ethdev_destroy     = pipeline_ethdev_destroy,