1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "test_perf_common.h"
7 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
10 perf_queue_nb_event_queues(struct evt_options *opt)
12 /* nb_queues = number of producers * number of stages */
13 uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
14 rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
15 return nb_prod * opt->nb_stages;
18 static inline __attribute__((always_inline)) void
19 mark_fwd_latency(struct rte_event *const ev,
20 const uint8_t nb_stages)
22 if (unlikely((ev->queue_id % nb_stages) == 0)) {
23 struct perf_elt *const m = ev->event_ptr;
25 m->timestamp = rte_get_timer_cycles();
29 static inline __attribute__((always_inline)) void
30 fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
31 const uint8_t nb_stages)
34 ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
35 ev->op = RTE_EVENT_OP_FORWARD;
36 ev->event_type = RTE_EVENT_TYPE_CPU;
40 perf_queue_worker(void *arg, const int enable_fwd_latency)
45 while (t->done == false) {
46 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
52 if (enable_fwd_latency && !prod_timer_type)
53 /* first q in pipeline, mark timestamp to compute fwd latency */
54 mark_fwd_latency(&ev, nb_stages);
56 /* last stage in pipeline */
57 if (unlikely((ev.queue_id % nb_stages) == laststage)) {
58 if (enable_fwd_latency)
59 cnt = perf_process_last_stage_latency(pool,
60 &ev, w, bufs, sz, cnt);
62 cnt = perf_process_last_stage(pool,
63 &ev, w, bufs, sz, cnt);
65 fwd_event(&ev, sched_type_list, nb_stages);
66 while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
74 perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
78 /* +1 to avoid prefetch out of array check */
79 struct rte_event ev[BURST_SIZE + 1];
81 while (t->done == false) {
82 uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
90 for (i = 0; i < nb_rx; i++) {
91 if (enable_fwd_latency && !prod_timer_type) {
92 rte_prefetch0(ev[i+1].event_ptr);
93 /* first queue in pipeline.
94 * mark time stamp to compute fwd latency
96 mark_fwd_latency(&ev[i], nb_stages);
98 /* last stage in pipeline */
99 if (unlikely((ev[i].queue_id % nb_stages) ==
101 if (enable_fwd_latency)
102 cnt = perf_process_last_stage_latency(
103 pool, &ev[i], w, bufs, sz, cnt);
105 cnt = perf_process_last_stage(pool,
106 &ev[i], w, bufs, sz, cnt);
108 ev[i].op = RTE_EVENT_OP_RELEASE;
110 fwd_event(&ev[i], sched_type_list, nb_stages);
116 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
117 while (enq < nb_rx) {
118 enq += rte_event_enqueue_burst(dev, port,
119 ev + enq, nb_rx - enq);
126 worker_wrapper(void *arg)
128 struct worker_data *w = arg;
129 struct evt_options *opt = w->t->opt;
131 const bool burst = evt_has_burst_mode(w->dev_id);
132 const int fwd_latency = opt->fwd_latency;
134 /* allow compiler to optimize */
135 if (!burst && !fwd_latency)
136 return perf_queue_worker(arg, 0);
137 else if (!burst && fwd_latency)
138 return perf_queue_worker(arg, 1);
139 else if (burst && !fwd_latency)
140 return perf_queue_worker_burst(arg, 0);
141 else if (burst && fwd_latency)
142 return perf_queue_worker_burst(arg, 1);
144 rte_panic("invalid worker\n");
148 perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
150 return perf_launch_lcores(test, opt, worker_wrapper);
154 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
157 int nb_stages = opt->nb_stages;
161 struct rte_event_dev_info dev_info;
163 nb_ports = evt_nr_active_lcores(opt->wlcores);
164 nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
165 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 :
166 evt_nr_active_lcores(opt->plcores);
168 nb_queues = perf_queue_nb_event_queues(opt);
170 memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
171 ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
173 evt_err("failed to get eventdev info %d", opt->dev_id);
177 const struct rte_event_dev_config config = {
178 .nb_event_queues = nb_queues,
179 .nb_event_ports = nb_ports,
180 .nb_events_limit = dev_info.max_num_events,
181 .nb_event_queue_flows = opt->nb_flows,
182 .nb_event_port_dequeue_depth =
183 dev_info.max_event_port_dequeue_depth,
184 .nb_event_port_enqueue_depth =
185 dev_info.max_event_port_enqueue_depth,
188 ret = rte_event_dev_configure(opt->dev_id, &config);
190 evt_err("failed to configure eventdev %d", opt->dev_id);
194 struct rte_event_queue_conf q_conf = {
195 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
196 .nb_atomic_flows = opt->nb_flows,
197 .nb_atomic_order_sequences = opt->nb_flows,
199 /* queue configurations */
200 for (queue = 0; queue < nb_queues; queue++) {
201 q_conf.schedule_type =
202 (opt->sched_type_list[queue % nb_stages]);
204 if (opt->q_priority) {
205 uint8_t stage_pos = queue % nb_stages;
206 /* Configure event queues(stage 0 to stage n) with
207 * RTE_EVENT_DEV_PRIORITY_LOWEST to
208 * RTE_EVENT_DEV_PRIORITY_HIGHEST.
210 uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
212 /* Higher prio for the queues closer to last stage */
213 q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
216 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
218 evt_err("failed to setup queue=%d", queue);
223 if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
224 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
226 /* port configuration */
227 const struct rte_event_port_conf p_conf = {
228 .dequeue_depth = opt->wkr_deq_dep,
229 .enqueue_depth = dev_info.max_event_port_dequeue_depth,
230 .new_event_threshold = dev_info.max_num_events,
233 ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
238 if (!evt_has_distributed_sched(opt->dev_id)) {
240 rte_event_dev_service_id_get(opt->dev_id, &service_id);
241 ret = evt_service_setup(service_id);
243 evt_err("No service lcore found to run event dev.");
248 ret = rte_event_dev_start(opt->dev_id);
250 evt_err("failed to start eventdev %d", opt->dev_id);
258 perf_queue_opt_dump(struct evt_options *opt)
260 evt_dump_fwd_latency(opt);
261 perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
265 perf_queue_opt_check(struct evt_options *opt)
267 return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
271 perf_queue_capability_check(struct evt_options *opt)
273 struct rte_event_dev_info dev_info;
275 rte_event_dev_info_get(opt->dev_id, &dev_info);
276 if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
277 dev_info.max_event_ports < perf_nb_event_ports(opt)) {
278 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
279 perf_queue_nb_event_queues(opt),
280 dev_info.max_event_queues,
281 perf_nb_event_ports(opt), dev_info.max_event_ports);
287 static const struct evt_test_ops perf_queue = {
288 .cap_check = perf_queue_capability_check,
289 .opt_check = perf_queue_opt_check,
290 .opt_dump = perf_queue_opt_dump,
291 .test_setup = perf_test_setup,
292 .mempool_setup = perf_mempool_setup,
293 .ethdev_setup = perf_ethdev_setup,
294 .eventdev_setup = perf_queue_eventdev_setup,
295 .launch_lcores = perf_queue_launch_lcores,
296 .eventdev_destroy = perf_eventdev_destroy,
297 .mempool_destroy = perf_mempool_destroy,
298 .ethdev_destroy = perf_ethdev_destroy,
299 .test_result = perf_test_result,
300 .test_destroy = perf_test_destroy,
303 EVT_TEST_REGISTER(perf_queue);