app: use SPDX tag for Cavium copyright files
[dpdk.git] / app / test-eventdev / test_perf_queue.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include "test_perf_common.h"
6
7 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
8
9 static inline int
10 perf_queue_nb_event_queues(struct evt_options *opt)
11 {
12         /* nb_queues = number of producers * number of stages */
13         return evt_nr_active_lcores(opt->plcores) * opt->nb_stages;
14 }
15
16 static inline __attribute__((always_inline)) void
17 mark_fwd_latency(struct rte_event *const ev,
18                 const uint8_t nb_stages)
19 {
20         if (unlikely((ev->queue_id % nb_stages) == 0)) {
21                 struct perf_elt *const m = ev->event_ptr;
22
23                 m->timestamp = rte_get_timer_cycles();
24         }
25 }
26
27 static inline __attribute__((always_inline)) void
28 fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
29                 const uint8_t nb_stages)
30 {
31         ev->queue_id++;
32         ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
33         ev->op = RTE_EVENT_OP_FORWARD;
34         ev->event_type = RTE_EVENT_TYPE_CPU;
35 }
36
37 static int
38 perf_queue_worker(void *arg, const int enable_fwd_latency)
39 {
40         PERF_WORKER_INIT;
41         struct rte_event ev;
42
43         while (t->done == false) {
44                 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
45
46                 if (!event) {
47                         rte_pause();
48                         continue;
49                 }
50                 if (enable_fwd_latency)
51                 /* first q in pipeline, mark timestamp to compute fwd latency */
52                         mark_fwd_latency(&ev, nb_stages);
53
54                 /* last stage in pipeline */
55                 if (unlikely((ev.queue_id % nb_stages) == laststage)) {
56                         if (enable_fwd_latency)
57                                 cnt = perf_process_last_stage_latency(pool,
58                                         &ev, w, bufs, sz, cnt);
59                         else
60                                 cnt = perf_process_last_stage(pool,
61                                         &ev, w, bufs, sz, cnt);
62                 } else {
63                         fwd_event(&ev, sched_type_list, nb_stages);
64                         while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
65                                 rte_pause();
66                 }
67         }
68         return 0;
69 }
70
71 static int
72 perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
73 {
74         PERF_WORKER_INIT;
75         uint16_t i;
76         /* +1 to avoid prefetch out of array check */
77         struct rte_event ev[BURST_SIZE + 1];
78
79         while (t->done == false) {
80                 uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
81                                 BURST_SIZE, 0);
82
83                 if (!nb_rx) {
84                         rte_pause();
85                         continue;
86                 }
87
88                 for (i = 0; i < nb_rx; i++) {
89                         if (enable_fwd_latency) {
90                                 rte_prefetch0(ev[i+1].event_ptr);
91                                 /* first queue in pipeline.
92                                  * mark time stamp to compute fwd latency
93                                  */
94                                 mark_fwd_latency(&ev[i], nb_stages);
95                         }
96                         /* last stage in pipeline */
97                         if (unlikely((ev[i].queue_id % nb_stages) ==
98                                                  laststage)) {
99                                 if (enable_fwd_latency)
100                                         cnt = perf_process_last_stage_latency(
101                                                 pool, &ev[i], w, bufs, sz, cnt);
102                                 else
103                                         cnt = perf_process_last_stage(pool,
104                                                 &ev[i], w, bufs, sz, cnt);
105
106                                 ev[i].op = RTE_EVENT_OP_RELEASE;
107                         } else {
108                                 fwd_event(&ev[i], sched_type_list, nb_stages);
109                         }
110                 }
111
112                 uint16_t enq;
113
114                 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
115                 while (enq < nb_rx) {
116                         enq += rte_event_enqueue_burst(dev, port,
117                                                         ev + enq, nb_rx - enq);
118                 }
119         }
120         return 0;
121 }
122
123 static int
124 worker_wrapper(void *arg)
125 {
126         struct worker_data *w  = arg;
127         struct evt_options *opt = w->t->opt;
128
129         const bool burst = evt_has_burst_mode(w->dev_id);
130         const int fwd_latency = opt->fwd_latency;
131
132         /* allow compiler to optimize */
133         if (!burst && !fwd_latency)
134                 return perf_queue_worker(arg, 0);
135         else if (!burst && fwd_latency)
136                 return perf_queue_worker(arg, 1);
137         else if (burst && !fwd_latency)
138                 return perf_queue_worker_burst(arg, 0);
139         else if (burst && fwd_latency)
140                 return perf_queue_worker_burst(arg, 1);
141
142         rte_panic("invalid worker\n");
143 }
144
145 static int
146 perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
147 {
148         return perf_launch_lcores(test, opt, worker_wrapper);
149 }
150
151 static int
152 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
153 {
154         uint8_t queue;
155         int nb_stages = opt->nb_stages;
156         int ret;
157
158         const struct rte_event_dev_config config = {
159                         .nb_event_queues = perf_queue_nb_event_queues(opt),
160                         .nb_event_ports = perf_nb_event_ports(opt),
161                         .nb_events_limit  = 4096,
162                         .nb_event_queue_flows = opt->nb_flows,
163                         .nb_event_port_dequeue_depth = 128,
164                         .nb_event_port_enqueue_depth = 128,
165         };
166
167         ret = rte_event_dev_configure(opt->dev_id, &config);
168         if (ret) {
169                 evt_err("failed to configure eventdev %d", opt->dev_id);
170                 return ret;
171         }
172
173         struct rte_event_queue_conf q_conf = {
174                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
175                         .nb_atomic_flows = opt->nb_flows,
176                         .nb_atomic_order_sequences = opt->nb_flows,
177         };
178         /* queue configurations */
179         for (queue = 0; queue < perf_queue_nb_event_queues(opt); queue++) {
180                 q_conf.schedule_type =
181                         (opt->sched_type_list[queue % nb_stages]);
182
183                 if (opt->q_priority) {
184                         uint8_t stage_pos = queue % nb_stages;
185                         /* Configure event queues(stage 0 to stage n) with
186                          * RTE_EVENT_DEV_PRIORITY_LOWEST to
187                          * RTE_EVENT_DEV_PRIORITY_HIGHEST.
188                          */
189                         uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
190                                         (nb_stages - 1);
191                         /* Higher prio for the queues closer to last stage */
192                         q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
193                                         (step * stage_pos);
194                 }
195                 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
196                 if (ret) {
197                         evt_err("failed to setup queue=%d", queue);
198                         return ret;
199                 }
200         }
201
202         ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
203                                         perf_queue_nb_event_queues(opt));
204         if (ret)
205                 return ret;
206
207         ret = evt_service_setup(opt->dev_id);
208         if (ret) {
209                 evt_err("No service lcore found to run event dev.");
210                 return ret;
211         }
212
213         ret = rte_event_dev_start(opt->dev_id);
214         if (ret) {
215                 evt_err("failed to start eventdev %d", opt->dev_id);
216                 return ret;
217         }
218
219         return 0;
220 }
221
222 static void
223 perf_queue_opt_dump(struct evt_options *opt)
224 {
225         evt_dump_fwd_latency(opt);
226         perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
227 }
228
229 static int
230 perf_queue_opt_check(struct evt_options *opt)
231 {
232         return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
233 }
234
235 static bool
236 perf_queue_capability_check(struct evt_options *opt)
237 {
238         struct rte_event_dev_info dev_info;
239
240         rte_event_dev_info_get(opt->dev_id, &dev_info);
241         if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
242                         dev_info.max_event_ports < perf_nb_event_ports(opt)) {
243                 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
244                         perf_queue_nb_event_queues(opt),
245                         dev_info.max_event_queues,
246                         perf_nb_event_ports(opt), dev_info.max_event_ports);
247         }
248
249         return true;
250 }
251
252 static const struct evt_test_ops perf_queue =  {
253         .cap_check          = perf_queue_capability_check,
254         .opt_check          = perf_queue_opt_check,
255         .opt_dump           = perf_queue_opt_dump,
256         .test_setup         = perf_test_setup,
257         .mempool_setup      = perf_mempool_setup,
258         .eventdev_setup     = perf_queue_eventdev_setup,
259         .launch_lcores      = perf_queue_launch_lcores,
260         .eventdev_destroy   = perf_eventdev_destroy,
261         .mempool_destroy    = perf_mempool_destroy,
262         .test_result        = perf_test_result,
263         .test_destroy       = perf_test_destroy,
264 };
265
266 EVT_TEST_REGISTER(perf_queue);