+ return 0;
+}
+
+static inline int
+perf_producer_burst(void *arg)
+{
+ uint32_t i;
+ uint64_t timestamp;
+ struct rte_event_dev_info dev_info;
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct rte_mempool *pool = t->pool;
+ const uint64_t nb_pkts = t->nb_pkts;
+ const uint32_t nb_flows = t->nb_flows;
+ uint32_t flow_counter = 0;
+ uint16_t enq = 0;
+ uint64_t count = 0;
+ struct perf_elt *m[MAX_PROD_ENQ_BURST_SIZE + 1];
+ struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE + 1];
+ uint32_t burst_size = opt->prod_enq_burst_sz;
+
+ memset(m, 0, sizeof(*m) * (MAX_PROD_ENQ_BURST_SIZE + 1));
+ rte_event_dev_info_get(dev_id, &dev_info);
+ if (dev_info.max_event_port_enqueue_depth < burst_size)
+ burst_size = dev_info.max_event_port_enqueue_depth;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
+ rte_lcore_id(), dev_id, port, p->queue_id);
+
+ for (i = 0; i < burst_size; i++) {
+ ev[i].op = RTE_EVENT_OP_NEW;
+ ev[i].queue_id = p->queue_id;
+ ev[i].sched_type = t->opt->sched_type_list[0];
+ ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev[i].event_type = RTE_EVENT_TYPE_CPU;
+ ev[i].sub_event_type = 0; /* stage 0 */
+ }
+
+ while (count < nb_pkts && t->done == false) {
+ if (rte_mempool_get_bulk(pool, (void **)m, burst_size) < 0)
+ continue;
+ timestamp = rte_get_timer_cycles();
+ for (i = 0; i < burst_size; i++) {
+ ev[i].flow_id = flow_counter++ % nb_flows;
+ ev[i].event_ptr = m[i];
+ m[i]->timestamp = timestamp;
+ }
+ enq = rte_event_enqueue_burst(dev_id, port, ev, burst_size);
+ while (enq < burst_size) {
+ enq += rte_event_enqueue_burst(dev_id, port,
+ ev + enq,
+ burst_size - enq);