1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "test_perf_common.h"
7 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
10 perf_queue_nb_event_queues(struct evt_options *opt)
12 /* nb_queues = number of producers * number of stages */
13 uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
14 rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
15 return nb_prod * opt->nb_stages;
18 static __rte_always_inline void
19 mark_fwd_latency(struct rte_event *const ev,
20 const uint8_t nb_stages)
22 if (unlikely((ev->queue_id % nb_stages) == 0)) {
23 struct perf_elt *const m = ev->event_ptr;
25 m->timestamp = rte_get_timer_cycles();
29 static __rte_always_inline void
30 fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
31 const uint8_t nb_stages)
34 ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
35 ev->op = RTE_EVENT_OP_FORWARD;
36 ev->event_type = RTE_EVENT_TYPE_CPU;
40 perf_queue_worker(void *arg, const int enable_fwd_latency)
42 uint16_t enq = 0, deq = 0;
46 while (t->done == false) {
47 deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
54 if (prod_crypto_type &&
55 (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
56 struct rte_crypto_op *op = ev.event_ptr;
58 if (op->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
59 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
60 if (op->sym->m_dst == NULL)
61 ev.event_ptr = op->sym->m_src;
63 ev.event_ptr = op->sym->m_dst;
64 rte_crypto_op_free(op);
67 rte_crypto_op_free(op);
72 if (enable_fwd_latency && !prod_timer_type)
73 /* first q in pipeline, mark timestamp to compute fwd latency */
74 mark_fwd_latency(&ev, nb_stages);
76 /* last stage in pipeline */
77 if (unlikely((ev.queue_id % nb_stages) == laststage)) {
78 if (enable_fwd_latency)
79 cnt = perf_process_last_stage_latency(pool,
80 &ev, w, bufs, sz, cnt);
82 cnt = perf_process_last_stage(pool,
83 &ev, w, bufs, sz, cnt);
85 fwd_event(&ev, sched_type_list, nb_stages);
87 enq = rte_event_enqueue_burst(dev, port, &ev,
89 } while (!enq && !t->done);
93 perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
99 perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
101 /* +1 to avoid prefetch out of array check */
102 struct rte_event ev[BURST_SIZE + 1];
103 uint16_t enq = 0, nb_rx = 0;
107 while (t->done == false) {
108 nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
115 for (i = 0; i < nb_rx; i++) {
116 if (prod_crypto_type &&
117 (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
118 struct rte_crypto_op *op = ev[i].event_ptr;
121 RTE_CRYPTO_OP_STATUS_SUCCESS) {
122 if (op->sym->m_dst == NULL)
128 rte_crypto_op_free(op);
130 rte_crypto_op_free(op);
135 if (enable_fwd_latency && !prod_timer_type) {
136 rte_prefetch0(ev[i+1].event_ptr);
137 /* first queue in pipeline.
138 * mark time stamp to compute fwd latency
140 mark_fwd_latency(&ev[i], nb_stages);
142 /* last stage in pipeline */
143 if (unlikely((ev[i].queue_id % nb_stages) ==
145 if (enable_fwd_latency)
146 cnt = perf_process_last_stage_latency(
147 pool, &ev[i], w, bufs, sz, cnt);
149 cnt = perf_process_last_stage(pool,
150 &ev[i], w, bufs, sz, cnt);
152 ev[i].op = RTE_EVENT_OP_RELEASE;
154 fwd_event(&ev[i], sched_type_list, nb_stages);
159 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
160 while (enq < nb_rx && !t->done) {
161 enq += rte_event_enqueue_burst(dev, port,
162 ev + enq, nb_rx - enq);
166 perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx);
172 worker_wrapper(void *arg)
174 struct worker_data *w = arg;
175 struct evt_options *opt = w->t->opt;
177 const bool burst = evt_has_burst_mode(w->dev_id);
178 const int fwd_latency = opt->fwd_latency;
180 /* allow compiler to optimize */
181 if (!burst && !fwd_latency)
182 return perf_queue_worker(arg, 0);
183 else if (!burst && fwd_latency)
184 return perf_queue_worker(arg, 1);
185 else if (burst && !fwd_latency)
186 return perf_queue_worker_burst(arg, 0);
187 else if (burst && fwd_latency)
188 return perf_queue_worker_burst(arg, 1);
190 rte_panic("invalid worker\n");
194 perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
196 return perf_launch_lcores(test, opt, worker_wrapper);
200 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
203 int nb_stages = opt->nb_stages;
208 struct rte_event_dev_info dev_info;
209 struct test_perf *t = evt_test_priv(test);
211 nb_ports = evt_nr_active_lcores(opt->wlcores);
212 nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
213 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 :
214 evt_nr_active_lcores(opt->plcores);
216 nb_queues = perf_queue_nb_event_queues(opt);
218 memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
219 ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
221 evt_err("failed to get eventdev info %d", opt->dev_id);
225 ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
227 evt_err("failed to configure eventdev %d", opt->dev_id);
231 struct rte_event_queue_conf q_conf = {
232 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
233 .nb_atomic_flows = opt->nb_flows,
234 .nb_atomic_order_sequences = opt->nb_flows,
236 /* queue configurations */
237 for (queue = 0; queue < nb_queues; queue++) {
238 q_conf.schedule_type =
239 (opt->sched_type_list[queue % nb_stages]);
241 if (opt->q_priority) {
242 uint8_t stage_pos = queue % nb_stages;
243 /* Configure event queues(stage 0 to stage n) with
244 * RTE_EVENT_DEV_PRIORITY_LOWEST to
245 * RTE_EVENT_DEV_PRIORITY_HIGHEST.
247 uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
249 /* Higher prio for the queues closer to last stage */
250 q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
253 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
255 evt_err("failed to setup queue=%d", queue);
260 if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
261 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
263 /* port configuration */
264 const struct rte_event_port_conf p_conf = {
265 .dequeue_depth = opt->wkr_deq_dep,
266 .enqueue_depth = dev_info.max_event_port_dequeue_depth,
267 .new_event_threshold = dev_info.max_num_events,
270 ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
275 if (!evt_has_distributed_sched(opt->dev_id)) {
277 rte_event_dev_service_id_get(opt->dev_id, &service_id);
278 ret = evt_service_setup(service_id);
280 evt_err("No service lcore found to run event dev.");
285 ret = rte_event_dev_start(opt->dev_id);
287 evt_err("failed to start eventdev %d", opt->dev_id);
291 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
292 RTE_ETH_FOREACH_DEV(prod) {
293 ret = rte_eth_dev_start(prod);
295 evt_err("Ethernet dev [%d] failed to start. Using synthetic producer",
300 ret = rte_event_eth_rx_adapter_start(prod);
302 evt_err("Rx adapter[%d] start failed", prod);
305 printf("%s: Port[%d] using Rx adapter[%d] started\n",
306 __func__, prod, prod);
308 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
309 for (prod = 0; prod < opt->nb_timer_adptrs; prod++) {
310 ret = rte_event_timer_adapter_start(
311 t->timer_adptr[prod]);
313 evt_err("failed to Start event timer adapter %d"
318 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
319 uint8_t cdev_id, cdev_count;
321 cdev_count = rte_cryptodev_count();
322 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
323 ret = rte_cryptodev_start(cdev_id);
325 evt_err("Failed to start cryptodev %u",
336 perf_queue_opt_dump(struct evt_options *opt)
338 evt_dump_fwd_latency(opt);
339 perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
343 perf_queue_opt_check(struct evt_options *opt)
345 return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
349 perf_queue_capability_check(struct evt_options *opt)
351 struct rte_event_dev_info dev_info;
353 rte_event_dev_info_get(opt->dev_id, &dev_info);
354 if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
355 dev_info.max_event_ports < perf_nb_event_ports(opt)) {
356 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
357 perf_queue_nb_event_queues(opt),
358 dev_info.max_event_queues,
359 perf_nb_event_ports(opt), dev_info.max_event_ports);
365 static const struct evt_test_ops perf_queue = {
366 .cap_check = perf_queue_capability_check,
367 .opt_check = perf_queue_opt_check,
368 .opt_dump = perf_queue_opt_dump,
369 .test_setup = perf_test_setup,
370 .mempool_setup = perf_mempool_setup,
371 .ethdev_setup = perf_ethdev_setup,
372 .cryptodev_setup = perf_cryptodev_setup,
373 .ethdev_rx_stop = perf_ethdev_rx_stop,
374 .eventdev_setup = perf_queue_eventdev_setup,
375 .launch_lcores = perf_queue_launch_lcores,
376 .eventdev_destroy = perf_eventdev_destroy,
377 .mempool_destroy = perf_mempool_destroy,
378 .ethdev_destroy = perf_ethdev_destroy,
379 .cryptodev_destroy = perf_cryptodev_destroy,
380 .test_result = perf_test_result,
381 .test_destroy = perf_test_destroy,
384 EVT_TEST_REGISTER(perf_queue);