}
}
+static void
+event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ rte_mempool_put(args, ev.event_ptr);
+}
+
+static inline void
+worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
+ uint16_t nb_enq, uint16_t nb_deq)
+{
+ int i;
+
+ if (!(nb_deq - nb_enq))
+ return;
+
+ if (nb_deq) {
+ for (i = nb_enq; i < nb_deq; i++) {
+ if (events[i].op == RTE_EVENT_OP_RELEASE)
+ continue;
+ rte_pktmbuf_free(events[i].mbuf);
+ }
+
+ for (i = 0; i < nb_deq; i++)
+ events[i].op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
+ }
+
+ rte_event_port_quiesce(dev_id, port_id, event_port_flush, NULL);
+}
+
void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
void set_worker_tx_enq_setup_data(struct setup_data *caps, bool burst);