4 * Copyright (C) Cavium, Inc 2017.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "test_perf_common.h"
35 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
38 perf_queue_nb_event_queues(struct evt_options *opt)
40 /* nb_queues = number of producers * number of stages */
41 return evt_nr_active_lcores(opt->plcores) * opt->nb_stages;
44 static inline __attribute__((always_inline)) void
45 mark_fwd_latency(struct rte_event *const ev,
46 const uint8_t nb_stages)
48 if (unlikely((ev->queue_id % nb_stages) == 0)) {
49 struct perf_elt *const m = ev->event_ptr;
51 m->timestamp = rte_get_timer_cycles();
55 static inline __attribute__((always_inline)) void
56 fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
57 const uint8_t nb_stages)
60 ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
61 ev->op = RTE_EVENT_OP_FORWARD;
62 ev->event_type = RTE_EVENT_TYPE_CPU;
66 perf_queue_worker(void *arg, const int enable_fwd_latency)
71 while (t->done == false) {
72 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
78 if (enable_fwd_latency)
79 /* first q in pipeline, mark timestamp to compute fwd latency */
80 mark_fwd_latency(&ev, nb_stages);
82 /* last stage in pipeline */
83 if (unlikely((ev.queue_id % nb_stages) == laststage)) {
84 if (enable_fwd_latency)
85 cnt = perf_process_last_stage_latency(pool,
86 &ev, w, bufs, sz, cnt);
88 cnt = perf_process_last_stage(pool,
89 &ev, w, bufs, sz, cnt);
91 fwd_event(&ev, sched_type_list, nb_stages);
92 while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
100 perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
104 /* +1 to avoid prefetch out of array check */
105 struct rte_event ev[BURST_SIZE + 1];
107 while (t->done == false) {
108 uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
116 for (i = 0; i < nb_rx; i++) {
117 if (enable_fwd_latency) {
118 rte_prefetch0(ev[i+1].event_ptr);
119 /* first queue in pipeline.
120 * mark time stamp to compute fwd latency
122 mark_fwd_latency(&ev[i], nb_stages);
124 /* last stage in pipeline */
125 if (unlikely((ev[i].queue_id % nb_stages) ==
127 if (enable_fwd_latency)
128 cnt = perf_process_last_stage_latency(
129 pool, &ev[i], w, bufs, sz, cnt);
131 cnt = perf_process_last_stage(pool,
132 &ev[i], w, bufs, sz, cnt);
134 ev[i].op = RTE_EVENT_OP_RELEASE;
136 fwd_event(&ev[i], sched_type_list, nb_stages);
142 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
143 while (enq < nb_rx) {
144 enq += rte_event_enqueue_burst(dev, port,
145 ev + enq, nb_rx - enq);
152 worker_wrapper(void *arg)
154 struct worker_data *w = arg;
155 struct evt_options *opt = w->t->opt;
157 const bool burst = evt_has_burst_mode(w->dev_id);
158 const int fwd_latency = opt->fwd_latency;
160 /* allow compiler to optimize */
161 if (!burst && !fwd_latency)
162 return perf_queue_worker(arg, 0);
163 else if (!burst && fwd_latency)
164 return perf_queue_worker(arg, 1);
165 else if (burst && !fwd_latency)
166 return perf_queue_worker_burst(arg, 0);
167 else if (burst && fwd_latency)
168 return perf_queue_worker_burst(arg, 1);
170 rte_panic("invalid worker\n");
174 perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
176 return perf_launch_lcores(test, opt, worker_wrapper);
180 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
183 int nb_stages = opt->nb_stages;
186 const struct rte_event_dev_config config = {
187 .nb_event_queues = perf_queue_nb_event_queues(opt),
188 .nb_event_ports = perf_nb_event_ports(opt),
189 .nb_events_limit = 4096,
190 .nb_event_queue_flows = opt->nb_flows,
191 .nb_event_port_dequeue_depth = 128,
192 .nb_event_port_enqueue_depth = 128,
195 ret = rte_event_dev_configure(opt->dev_id, &config);
197 evt_err("failed to configure eventdev %d", opt->dev_id);
201 struct rte_event_queue_conf q_conf = {
202 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
203 .nb_atomic_flows = opt->nb_flows,
204 .nb_atomic_order_sequences = opt->nb_flows,
206 /* queue configurations */
207 for (queue = 0; queue < perf_queue_nb_event_queues(opt); queue++) {
208 q_conf.event_queue_cfg = evt_sched_type2queue_cfg
209 (opt->sched_type_list[queue % nb_stages]);
211 if (opt->q_priority) {
212 uint8_t stage_pos = queue % nb_stages;
213 /* Configure event queues(stage 0 to stage n) with
214 * RTE_EVENT_DEV_PRIORITY_LOWEST to
215 * RTE_EVENT_DEV_PRIORITY_HIGHEST.
217 uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
219 /* Higher prio for the queues closer to last stage */
220 q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
223 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
225 evt_err("failed to setup queue=%d", queue);
230 ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
231 perf_queue_nb_event_queues(opt));
235 ret = rte_event_dev_start(opt->dev_id);
237 evt_err("failed to start eventdev %d", opt->dev_id);
245 perf_queue_opt_dump(struct evt_options *opt)
247 evt_dump_fwd_latency(opt);
248 perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
252 perf_queue_opt_check(struct evt_options *opt)
254 return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
258 perf_queue_capability_check(struct evt_options *opt)
260 struct rte_event_dev_info dev_info;
262 rte_event_dev_info_get(opt->dev_id, &dev_info);
263 if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
264 dev_info.max_event_ports < perf_nb_event_ports(opt)) {
265 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
266 perf_queue_nb_event_queues(opt),
267 dev_info.max_event_queues,
268 perf_nb_event_ports(opt), dev_info.max_event_ports);
274 static const struct evt_test_ops perf_queue = {
275 .cap_check = perf_queue_capability_check,
276 .opt_check = perf_queue_opt_check,
277 .opt_dump = perf_queue_opt_dump,
278 .test_setup = perf_test_setup,
279 .mempool_setup = perf_mempool_setup,
280 .eventdev_setup = perf_queue_eventdev_setup,
281 .launch_lcores = perf_queue_launch_lcores,
282 .eventdev_destroy = perf_eventdev_destroy,
283 .mempool_destroy = perf_mempool_destroy,
284 .test_result = perf_test_result,
285 .test_destroy = perf_test_destroy,
288 EVT_TEST_REGISTER(perf_queue);