app/eventdev: clean up worker state before exit
[dpdk.git] / app / test-eventdev / test_perf_atq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include "test_perf_common.h"
6
7 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
8
9 static inline int
10 atq_nb_event_queues(struct evt_options *opt)
11 {
12         /* nb_queues = number of producers */
13         return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
14                 rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
15 }
16
17 static __rte_always_inline void
18 atq_mark_fwd_latency(struct rte_event *const ev)
19 {
20         if (unlikely(ev->sub_event_type == 0)) {
21                 struct perf_elt *const m = ev->event_ptr;
22
23                 m->timestamp = rte_get_timer_cycles();
24         }
25 }
26
27 static __rte_always_inline void
28 atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
29                 const uint8_t nb_stages)
30 {
31         ev->sub_event_type++;
32         ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages];
33         ev->op = RTE_EVENT_OP_FORWARD;
34         ev->event_type = RTE_EVENT_TYPE_CPU;
35 }
36
37 static int
38 perf_atq_worker(void *arg, const int enable_fwd_latency)
39 {
40         uint16_t enq = 0, deq = 0;
41         struct rte_event ev;
42         PERF_WORKER_INIT;
43
44         while (t->done == false) {
45                 deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
46
47                 if (!deq) {
48                         rte_pause();
49                         continue;
50                 }
51
52                 if (prod_crypto_type &&
53                     (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
54                         struct rte_crypto_op *op = ev.event_ptr;
55
56                         if (op->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
57                                 if (op->sym->m_dst == NULL)
58                                         ev.event_ptr = op->sym->m_src;
59                                 else
60                                         ev.event_ptr = op->sym->m_dst;
61                                 rte_crypto_op_free(op);
62                         } else {
63                                 rte_crypto_op_free(op);
64                                 continue;
65                         }
66                 }
67
68                 if (enable_fwd_latency && !prod_timer_type)
69                 /* first stage in pipeline, mark ts to compute fwd latency */
70                         atq_mark_fwd_latency(&ev);
71
72                 /* last stage in pipeline */
73                 if (unlikely((ev.sub_event_type % nb_stages) == laststage)) {
74                         if (enable_fwd_latency)
75                                 cnt = perf_process_last_stage_latency(pool,
76                                         &ev, w, bufs, sz, cnt);
77                         else
78                                 cnt = perf_process_last_stage(pool, &ev, w,
79                                          bufs, sz, cnt);
80                 } else {
81                         atq_fwd_event(&ev, sched_type_list, nb_stages);
82                         do {
83                                 enq = rte_event_enqueue_burst(dev, port, &ev,
84                                                               1);
85                         } while (!enq && !t->done);
86                 }
87         }
88
89         perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
90
91         return 0;
92 }
93
94 static int
95 perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
96 {
97         /* +1 to avoid prefetch out of array check */
98         struct rte_event ev[BURST_SIZE + 1];
99         uint16_t enq = 0, nb_rx = 0;
100         PERF_WORKER_INIT;
101         uint16_t i;
102
103         while (t->done == false) {
104                 nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
105
106                 if (!nb_rx) {
107                         rte_pause();
108                         continue;
109                 }
110
111                 for (i = 0; i < nb_rx; i++) {
112                         if (prod_crypto_type &&
113                             (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
114                                 struct rte_crypto_op *op = ev[i].event_ptr;
115
116                                 if (op->status ==
117                                     RTE_CRYPTO_OP_STATUS_SUCCESS) {
118                                         if (op->sym->m_dst == NULL)
119                                                 ev[i].event_ptr =
120                                                         op->sym->m_src;
121                                         else
122                                                 ev[i].event_ptr =
123                                                         op->sym->m_dst;
124                                         rte_crypto_op_free(op);
125                                 } else {
126                                         rte_crypto_op_free(op);
127                                         continue;
128                                 }
129                         }
130
131                         if (enable_fwd_latency && !prod_timer_type) {
132                                 rte_prefetch0(ev[i+1].event_ptr);
133                                 /* first stage in pipeline.
134                                  * mark time stamp to compute fwd latency
135                                  */
136                                 atq_mark_fwd_latency(&ev[i]);
137                         }
138                         /* last stage in pipeline */
139                         if (unlikely((ev[i].sub_event_type % nb_stages)
140                                                 == laststage)) {
141                                 if (enable_fwd_latency)
142                                         cnt = perf_process_last_stage_latency(
143                                                 pool, &ev[i], w, bufs, sz, cnt);
144                                 else
145                                         cnt = perf_process_last_stage(pool,
146                                                 &ev[i], w, bufs, sz, cnt);
147
148                                 ev[i].op = RTE_EVENT_OP_RELEASE;
149                         } else {
150                                 atq_fwd_event(&ev[i], sched_type_list,
151                                                 nb_stages);
152                         }
153                 }
154
155                 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
156                 while ((enq < nb_rx) && !t->done) {
157                         enq += rte_event_enqueue_burst(dev, port,
158                                                         ev + enq, nb_rx - enq);
159                 }
160         }
161
162         perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx);
163
164         return 0;
165 }
166
167 static int
168 worker_wrapper(void *arg)
169 {
170         struct worker_data *w  = arg;
171         struct evt_options *opt = w->t->opt;
172
173         const bool burst = evt_has_burst_mode(w->dev_id);
174         const int fwd_latency = opt->fwd_latency;
175
176         /* allow compiler to optimize */
177         if (!burst && !fwd_latency)
178                 return perf_atq_worker(arg, 0);
179         else if (!burst && fwd_latency)
180                 return perf_atq_worker(arg, 1);
181         else if (burst && !fwd_latency)
182                 return perf_atq_worker_burst(arg, 0);
183         else if (burst && fwd_latency)
184                 return perf_atq_worker_burst(arg, 1);
185
186         rte_panic("invalid worker\n");
187 }
188
189 static int
190 perf_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
191 {
192         return perf_launch_lcores(test, opt, worker_wrapper);
193 }
194
195 static int
196 perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
197 {
198         int ret;
199         uint8_t queue;
200         uint8_t nb_queues;
201         uint8_t nb_ports;
202         uint16_t prod;
203         struct rte_event_dev_info dev_info;
204         struct test_perf *t = evt_test_priv(test);
205
206         nb_ports = evt_nr_active_lcores(opt->wlcores);
207         nb_ports += (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
208                         opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) ? 0 :
209                 evt_nr_active_lcores(opt->plcores);
210
211         nb_queues = atq_nb_event_queues(opt);
212
213         memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
214         ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
215         if (ret) {
216                 evt_err("failed to get eventdev info %d", opt->dev_id);
217                 return ret;
218         }
219
220         ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
221         if (ret) {
222                 evt_err("failed to configure eventdev %d", opt->dev_id);
223                 return ret;
224         }
225
226         struct rte_event_queue_conf q_conf = {
227                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
228                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
229                         .nb_atomic_flows = opt->nb_flows,
230                         .nb_atomic_order_sequences = opt->nb_flows,
231         };
232         /* queue configurations */
233         for (queue = 0; queue < nb_queues; queue++) {
234                 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
235                 if (ret) {
236                         evt_err("failed to setup queue=%d", queue);
237                         return ret;
238                 }
239         }
240
241         if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
242                 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
243
244         /* port configuration */
245         const struct rte_event_port_conf p_conf = {
246                         .dequeue_depth = opt->wkr_deq_dep,
247                         .enqueue_depth = dev_info.max_event_port_dequeue_depth,
248                         .new_event_threshold = dev_info.max_num_events,
249         };
250
251         ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues,
252                         &p_conf);
253         if (ret)
254                 return ret;
255
256         if (!evt_has_distributed_sched(opt->dev_id)) {
257                 uint32_t service_id;
258                 rte_event_dev_service_id_get(opt->dev_id, &service_id);
259                 ret = evt_service_setup(service_id);
260                 if (ret) {
261                         evt_err("No service lcore found to run event dev.");
262                         return ret;
263                 }
264         }
265
266         ret = rte_event_dev_start(opt->dev_id);
267         if (ret) {
268                 evt_err("failed to start eventdev %d", opt->dev_id);
269                 return ret;
270         }
271
272         if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
273                 RTE_ETH_FOREACH_DEV(prod) {
274                         ret = rte_eth_dev_start(prod);
275                         if (ret) {
276                                 evt_err("Ethernet dev [%d] failed to start. Using synthetic producer",
277                                                 prod);
278                                 return ret;
279                         }
280
281                         ret = rte_event_eth_rx_adapter_start(prod);
282                         if (ret) {
283                                 evt_err("Rx adapter[%d] start failed", prod);
284                                 return ret;
285                         }
286                         printf("%s: Port[%d] using Rx adapter[%d] started\n",
287                                         __func__, prod, prod);
288                 }
289         } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
290                 for (prod = 0; prod < opt->nb_timer_adptrs; prod++) {
291                         ret = rte_event_timer_adapter_start(
292                                         t->timer_adptr[prod]);
293                         if (ret) {
294                                 evt_err("failed to Start event timer adapter %d"
295                                                 , prod);
296                                 return ret;
297                         }
298                 }
299         } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
300                 uint8_t cdev_id, cdev_count;
301
302                 cdev_count = rte_cryptodev_count();
303                 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
304                         ret = rte_cryptodev_start(cdev_id);
305                         if (ret) {
306                                 evt_err("Failed to start cryptodev %u",
307                                         cdev_id);
308                                 return ret;
309                         }
310                 }
311         }
312
313         return 0;
314 }
315
316 static void
317 perf_atq_opt_dump(struct evt_options *opt)
318 {
319         perf_opt_dump(opt, atq_nb_event_queues(opt));
320 }
321
322 static int
323 perf_atq_opt_check(struct evt_options *opt)
324 {
325         return perf_opt_check(opt, atq_nb_event_queues(opt));
326 }
327
328 static bool
329 perf_atq_capability_check(struct evt_options *opt)
330 {
331         struct rte_event_dev_info dev_info;
332
333         rte_event_dev_info_get(opt->dev_id, &dev_info);
334         if (dev_info.max_event_queues < atq_nb_event_queues(opt) ||
335                         dev_info.max_event_ports < perf_nb_event_ports(opt)) {
336                 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
337                         atq_nb_event_queues(opt), dev_info.max_event_queues,
338                         perf_nb_event_ports(opt), dev_info.max_event_ports);
339         }
340         if (!evt_has_all_types_queue(opt->dev_id))
341                 return false;
342
343         return true;
344 }
345
346 static const struct evt_test_ops perf_atq =  {
347         .cap_check          = perf_atq_capability_check,
348         .opt_check          = perf_atq_opt_check,
349         .opt_dump           = perf_atq_opt_dump,
350         .test_setup         = perf_test_setup,
351         .ethdev_setup       = perf_ethdev_setup,
352         .cryptodev_setup    = perf_cryptodev_setup,
353         .ethdev_rx_stop     = perf_ethdev_rx_stop,
354         .mempool_setup      = perf_mempool_setup,
355         .eventdev_setup     = perf_atq_eventdev_setup,
356         .launch_lcores      = perf_atq_launch_lcores,
357         .eventdev_destroy   = perf_eventdev_destroy,
358         .mempool_destroy    = perf_mempool_destroy,
359         .ethdev_destroy     = perf_ethdev_destroy,
360         .cryptodev_destroy  = perf_cryptodev_destroy,
361         .test_result        = perf_test_result,
362         .test_destroy       = perf_test_destroy,
363 };
364
365 EVT_TEST_REGISTER(perf_atq);