1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "test_perf_common.h"
7 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
10 atq_nb_event_queues(struct evt_options *opt)
12 /* nb_queues = number of producers */
13 return evt_nr_active_lcores(opt->plcores);
16 static inline __attribute__((always_inline)) void
17 atq_mark_fwd_latency(struct rte_event *const ev)
19 if (unlikely(ev->sub_event_type == 0)) {
20 struct perf_elt *const m = ev->event_ptr;
22 m->timestamp = rte_get_timer_cycles();
26 static inline __attribute__((always_inline)) void
27 atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
28 const uint8_t nb_stages)
31 ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages];
32 ev->op = RTE_EVENT_OP_FORWARD;
33 ev->event_type = RTE_EVENT_TYPE_CPU;
37 perf_atq_worker(void *arg, const int enable_fwd_latency)
42 while (t->done == false) {
43 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
45 if (enable_fwd_latency)
46 rte_prefetch0(ev.event_ptr);
53 if (enable_fwd_latency)
54 /* first stage in pipeline, mark ts to compute fwd latency */
55 atq_mark_fwd_latency(&ev);
57 /* last stage in pipeline */
58 if (unlikely((ev.sub_event_type % nb_stages) == laststage)) {
59 if (enable_fwd_latency)
60 cnt = perf_process_last_stage_latency(pool,
61 &ev, w, bufs, sz, cnt);
63 cnt = perf_process_last_stage(pool, &ev, w,
66 atq_fwd_event(&ev, sched_type_list, nb_stages);
67 while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
75 perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
79 /* +1 to avoid prefetch out of array check */
80 struct rte_event ev[BURST_SIZE + 1];
82 while (t->done == false) {
83 uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
91 for (i = 0; i < nb_rx; i++) {
92 if (enable_fwd_latency) {
93 rte_prefetch0(ev[i+1].event_ptr);
94 /* first stage in pipeline.
95 * mark time stamp to compute fwd latency
97 atq_mark_fwd_latency(&ev[i]);
99 /* last stage in pipeline */
100 if (unlikely((ev[i].sub_event_type % nb_stages)
102 if (enable_fwd_latency)
103 cnt = perf_process_last_stage_latency(
104 pool, &ev[i], w, bufs, sz, cnt);
106 cnt = perf_process_last_stage(pool,
107 &ev[i], w, bufs, sz, cnt);
109 ev[i].op = RTE_EVENT_OP_RELEASE;
111 atq_fwd_event(&ev[i], sched_type_list,
118 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
119 while (enq < nb_rx) {
120 enq += rte_event_enqueue_burst(dev, port,
121 ev + enq, nb_rx - enq);
128 worker_wrapper(void *arg)
130 struct worker_data *w = arg;
131 struct evt_options *opt = w->t->opt;
133 const bool burst = evt_has_burst_mode(w->dev_id);
134 const int fwd_latency = opt->fwd_latency;
136 /* allow compiler to optimize */
137 if (!burst && !fwd_latency)
138 return perf_atq_worker(arg, 0);
139 else if (!burst && fwd_latency)
140 return perf_atq_worker(arg, 1);
141 else if (burst && !fwd_latency)
142 return perf_atq_worker_burst(arg, 0);
143 else if (burst && fwd_latency)
144 return perf_atq_worker_burst(arg, 1);
146 rte_panic("invalid worker\n");
150 perf_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
152 return perf_launch_lcores(test, opt, worker_wrapper);
156 perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
161 const struct rte_event_dev_config config = {
162 .nb_event_queues = atq_nb_event_queues(opt),
163 .nb_event_ports = perf_nb_event_ports(opt),
164 .nb_events_limit = 4096,
165 .nb_event_queue_flows = opt->nb_flows,
166 .nb_event_port_dequeue_depth = 128,
167 .nb_event_port_enqueue_depth = 128,
170 ret = rte_event_dev_configure(opt->dev_id, &config);
172 evt_err("failed to configure eventdev %d", opt->dev_id);
176 struct rte_event_queue_conf q_conf = {
177 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
178 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
179 .nb_atomic_flows = opt->nb_flows,
180 .nb_atomic_order_sequences = opt->nb_flows,
182 /* queue configurations */
183 for (queue = 0; queue < atq_nb_event_queues(opt); queue++) {
184 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
186 evt_err("failed to setup queue=%d", queue);
191 ret = perf_event_dev_port_setup(test, opt, 1 /* stride */,
192 atq_nb_event_queues(opt));
196 ret = evt_service_setup(opt->dev_id);
198 evt_err("No service lcore found to run event dev.");
202 ret = rte_event_dev_start(opt->dev_id);
204 evt_err("failed to start eventdev %d", opt->dev_id);
212 perf_atq_opt_dump(struct evt_options *opt)
214 perf_opt_dump(opt, atq_nb_event_queues(opt));
218 perf_atq_opt_check(struct evt_options *opt)
220 return perf_opt_check(opt, atq_nb_event_queues(opt));
224 perf_atq_capability_check(struct evt_options *opt)
226 struct rte_event_dev_info dev_info;
228 rte_event_dev_info_get(opt->dev_id, &dev_info);
229 if (dev_info.max_event_queues < atq_nb_event_queues(opt) ||
230 dev_info.max_event_ports < perf_nb_event_ports(opt)) {
231 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
232 atq_nb_event_queues(opt), dev_info.max_event_queues,
233 perf_nb_event_ports(opt), dev_info.max_event_ports);
235 if (!evt_has_all_types_queue(opt->dev_id))
241 static const struct evt_test_ops perf_atq = {
242 .cap_check = perf_atq_capability_check,
243 .opt_check = perf_atq_opt_check,
244 .opt_dump = perf_atq_opt_dump,
245 .test_setup = perf_test_setup,
246 .mempool_setup = perf_mempool_setup,
247 .eventdev_setup = perf_atq_eventdev_setup,
248 .launch_lcores = perf_atq_launch_lcores,
249 .eventdev_destroy = perf_eventdev_destroy,
250 .mempool_destroy = perf_mempool_destroy,
251 .test_result = perf_test_result,
252 .test_destroy = perf_test_destroy,
255 EVT_TEST_REGISTER(perf_atq);