1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "test_perf_common.h"
7 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
10 perf_queue_nb_event_queues(struct evt_options *opt)
12 /* nb_queues = number of producers * number of stages */
13 return evt_nr_active_lcores(opt->plcores) * opt->nb_stages;
16 static inline __attribute__((always_inline)) void
17 mark_fwd_latency(struct rte_event *const ev,
18 const uint8_t nb_stages)
20 if (unlikely((ev->queue_id % nb_stages) == 0)) {
21 struct perf_elt *const m = ev->event_ptr;
23 m->timestamp = rte_get_timer_cycles();
27 static inline __attribute__((always_inline)) void
28 fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
29 const uint8_t nb_stages)
32 ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
33 ev->op = RTE_EVENT_OP_FORWARD;
34 ev->event_type = RTE_EVENT_TYPE_CPU;
38 perf_queue_worker(void *arg, const int enable_fwd_latency)
43 while (t->done == false) {
44 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
50 if (enable_fwd_latency)
51 /* first q in pipeline, mark timestamp to compute fwd latency */
52 mark_fwd_latency(&ev, nb_stages);
54 /* last stage in pipeline */
55 if (unlikely((ev.queue_id % nb_stages) == laststage)) {
56 if (enable_fwd_latency)
57 cnt = perf_process_last_stage_latency(pool,
58 &ev, w, bufs, sz, cnt);
60 cnt = perf_process_last_stage(pool,
61 &ev, w, bufs, sz, cnt);
63 fwd_event(&ev, sched_type_list, nb_stages);
64 while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
72 perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
76 /* +1 to avoid prefetch out of array check */
77 struct rte_event ev[BURST_SIZE + 1];
79 while (t->done == false) {
80 uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
88 for (i = 0; i < nb_rx; i++) {
89 if (enable_fwd_latency) {
90 rte_prefetch0(ev[i+1].event_ptr);
91 /* first queue in pipeline.
92 * mark time stamp to compute fwd latency
94 mark_fwd_latency(&ev[i], nb_stages);
96 /* last stage in pipeline */
97 if (unlikely((ev[i].queue_id % nb_stages) ==
99 if (enable_fwd_latency)
100 cnt = perf_process_last_stage_latency(
101 pool, &ev[i], w, bufs, sz, cnt);
103 cnt = perf_process_last_stage(pool,
104 &ev[i], w, bufs, sz, cnt);
106 ev[i].op = RTE_EVENT_OP_RELEASE;
108 fwd_event(&ev[i], sched_type_list, nb_stages);
114 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
115 while (enq < nb_rx) {
116 enq += rte_event_enqueue_burst(dev, port,
117 ev + enq, nb_rx - enq);
124 worker_wrapper(void *arg)
126 struct worker_data *w = arg;
127 struct evt_options *opt = w->t->opt;
129 const bool burst = evt_has_burst_mode(w->dev_id);
130 const int fwd_latency = opt->fwd_latency;
132 /* allow compiler to optimize */
133 if (!burst && !fwd_latency)
134 return perf_queue_worker(arg, 0);
135 else if (!burst && fwd_latency)
136 return perf_queue_worker(arg, 1);
137 else if (burst && !fwd_latency)
138 return perf_queue_worker_burst(arg, 0);
139 else if (burst && fwd_latency)
140 return perf_queue_worker_burst(arg, 1);
142 rte_panic("invalid worker\n");
146 perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
148 return perf_launch_lcores(test, opt, worker_wrapper);
152 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
155 int nb_stages = opt->nb_stages;
159 struct rte_event_dev_info dev_info;
161 nb_ports = evt_nr_active_lcores(opt->wlcores);
162 nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
163 evt_nr_active_lcores(opt->plcores);
165 nb_queues = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
166 rte_eth_dev_count() * nb_stages :
167 perf_queue_nb_event_queues(opt);
169 memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
170 ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
172 evt_err("failed to get eventdev info %d", opt->dev_id);
176 const struct rte_event_dev_config config = {
177 .nb_event_queues = nb_queues,
178 .nb_event_ports = nb_ports,
179 .nb_events_limit = dev_info.max_num_events,
180 .nb_event_queue_flows = opt->nb_flows,
181 .nb_event_port_dequeue_depth =
182 dev_info.max_event_port_dequeue_depth,
183 .nb_event_port_enqueue_depth =
184 dev_info.max_event_port_enqueue_depth,
187 ret = rte_event_dev_configure(opt->dev_id, &config);
189 evt_err("failed to configure eventdev %d", opt->dev_id);
193 struct rte_event_queue_conf q_conf = {
194 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
195 .nb_atomic_flows = opt->nb_flows,
196 .nb_atomic_order_sequences = opt->nb_flows,
198 /* queue configurations */
199 for (queue = 0; queue < perf_queue_nb_event_queues(opt); queue++) {
200 q_conf.schedule_type =
201 (opt->sched_type_list[queue % nb_stages]);
203 if (opt->q_priority) {
204 uint8_t stage_pos = queue % nb_stages;
205 /* Configure event queues(stage 0 to stage n) with
206 * RTE_EVENT_DEV_PRIORITY_LOWEST to
207 * RTE_EVENT_DEV_PRIORITY_HIGHEST.
209 uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
211 /* Higher prio for the queues closer to last stage */
212 q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
215 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
217 evt_err("failed to setup queue=%d", queue);
222 ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
227 if (!evt_has_distributed_sched(opt->dev_id)) {
229 rte_event_dev_service_id_get(opt->dev_id, &service_id);
230 ret = evt_service_setup(service_id);
232 evt_err("No service lcore found to run event dev.");
237 ret = rte_event_dev_start(opt->dev_id);
239 evt_err("failed to start eventdev %d", opt->dev_id);
247 perf_queue_opt_dump(struct evt_options *opt)
249 evt_dump_fwd_latency(opt);
250 perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
254 perf_queue_opt_check(struct evt_options *opt)
256 return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
260 perf_queue_capability_check(struct evt_options *opt)
262 struct rte_event_dev_info dev_info;
264 rte_event_dev_info_get(opt->dev_id, &dev_info);
265 if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
266 dev_info.max_event_ports < perf_nb_event_ports(opt)) {
267 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
268 perf_queue_nb_event_queues(opt),
269 dev_info.max_event_queues,
270 perf_nb_event_ports(opt), dev_info.max_event_ports);
276 static const struct evt_test_ops perf_queue = {
277 .cap_check = perf_queue_capability_check,
278 .opt_check = perf_queue_opt_check,
279 .opt_dump = perf_queue_opt_dump,
280 .test_setup = perf_test_setup,
281 .mempool_setup = perf_mempool_setup,
282 .ethdev_setup = perf_ethdev_setup,
283 .eventdev_setup = perf_queue_eventdev_setup,
284 .launch_lcores = perf_queue_launch_lcores,
285 .eventdev_destroy = perf_eventdev_destroy,
286 .mempool_destroy = perf_mempool_destroy,
287 .ethdev_destroy = perf_ethdev_destroy,
288 .test_result = perf_test_result,
289 .test_destroy = perf_test_destroy,
292 EVT_TEST_REGISTER(perf_queue);