app/eventdev: add ethernet device setup helpers
[dpdk.git] / app / test-eventdev / test_perf_atq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include "test_perf_common.h"
6
7 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
8
9 static inline int
10 atq_nb_event_queues(struct evt_options *opt)
11 {
12         /* nb_queues = number of producers */
13         return evt_nr_active_lcores(opt->plcores);
14 }
15
16 static inline __attribute__((always_inline)) void
17 atq_mark_fwd_latency(struct rte_event *const ev)
18 {
19         if (unlikely(ev->sub_event_type == 0)) {
20                 struct perf_elt *const m = ev->event_ptr;
21
22                 m->timestamp = rte_get_timer_cycles();
23         }
24 }
25
26 static inline __attribute__((always_inline)) void
27 atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
28                 const uint8_t nb_stages)
29 {
30         ev->sub_event_type++;
31         ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages];
32         ev->op = RTE_EVENT_OP_FORWARD;
33         ev->event_type = RTE_EVENT_TYPE_CPU;
34 }
35
36 static int
37 perf_atq_worker(void *arg, const int enable_fwd_latency)
38 {
39         PERF_WORKER_INIT;
40         struct rte_event ev;
41
42         while (t->done == false) {
43                 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
44
45                 if (enable_fwd_latency)
46                         rte_prefetch0(ev.event_ptr);
47
48                 if (!event) {
49                         rte_pause();
50                         continue;
51                 }
52
53                 if (enable_fwd_latency)
54                 /* first stage in pipeline, mark ts to compute fwd latency */
55                         atq_mark_fwd_latency(&ev);
56
57                 /* last stage in pipeline */
58                 if (unlikely((ev.sub_event_type % nb_stages) == laststage)) {
59                         if (enable_fwd_latency)
60                                 cnt = perf_process_last_stage_latency(pool,
61                                         &ev, w, bufs, sz, cnt);
62                         else
63                                 cnt = perf_process_last_stage(pool, &ev, w,
64                                          bufs, sz, cnt);
65                 } else {
66                         atq_fwd_event(&ev, sched_type_list, nb_stages);
67                         while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
68                                 rte_pause();
69                 }
70         }
71         return 0;
72 }
73
74 static int
75 perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
76 {
77         PERF_WORKER_INIT;
78         uint16_t i;
79         /* +1 to avoid prefetch out of array check */
80         struct rte_event ev[BURST_SIZE + 1];
81
82         while (t->done == false) {
83                 uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
84                                 BURST_SIZE, 0);
85
86                 if (!nb_rx) {
87                         rte_pause();
88                         continue;
89                 }
90
91                 for (i = 0; i < nb_rx; i++) {
92                         if (enable_fwd_latency) {
93                                 rte_prefetch0(ev[i+1].event_ptr);
94                                 /* first stage in pipeline.
95                                  * mark time stamp to compute fwd latency
96                                  */
97                                 atq_mark_fwd_latency(&ev[i]);
98                         }
99                         /* last stage in pipeline */
100                         if (unlikely((ev[i].sub_event_type % nb_stages)
101                                                 == laststage)) {
102                                 if (enable_fwd_latency)
103                                         cnt = perf_process_last_stage_latency(
104                                                 pool, &ev[i], w, bufs, sz, cnt);
105                                 else
106                                         cnt = perf_process_last_stage(pool,
107                                                 &ev[i], w, bufs, sz, cnt);
108
109                                 ev[i].op = RTE_EVENT_OP_RELEASE;
110                         } else {
111                                 atq_fwd_event(&ev[i], sched_type_list,
112                                                 nb_stages);
113                         }
114                 }
115
116                 uint16_t enq;
117
118                 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
119                 while (enq < nb_rx) {
120                         enq += rte_event_enqueue_burst(dev, port,
121                                                         ev + enq, nb_rx - enq);
122                 }
123         }
124         return 0;
125 }
126
127 static int
128 worker_wrapper(void *arg)
129 {
130         struct worker_data *w  = arg;
131         struct evt_options *opt = w->t->opt;
132
133         const bool burst = evt_has_burst_mode(w->dev_id);
134         const int fwd_latency = opt->fwd_latency;
135
136         /* allow compiler to optimize */
137         if (!burst && !fwd_latency)
138                 return perf_atq_worker(arg, 0);
139         else if (!burst && fwd_latency)
140                 return perf_atq_worker(arg, 1);
141         else if (burst && !fwd_latency)
142                 return perf_atq_worker_burst(arg, 0);
143         else if (burst && fwd_latency)
144                 return perf_atq_worker_burst(arg, 1);
145
146         rte_panic("invalid worker\n");
147 }
148
149 static int
150 perf_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
151 {
152         return perf_launch_lcores(test, opt, worker_wrapper);
153 }
154
155 static int
156 perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
157 {
158         int ret;
159         uint8_t queue;
160         uint8_t nb_queues;
161         uint8_t nb_ports;
162
163         nb_ports = evt_nr_active_lcores(opt->wlcores);
164         nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
165                 evt_nr_active_lcores(opt->plcores);
166
167         nb_queues = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
168                 rte_eth_dev_count() : atq_nb_event_queues(opt);
169
170         const struct rte_event_dev_config config = {
171                         .nb_event_queues = nb_queues,
172                         .nb_event_ports = nb_ports,
173                         .nb_events_limit  = 4096,
174                         .nb_event_queue_flows = opt->nb_flows,
175                         .nb_event_port_dequeue_depth = 128,
176                         .nb_event_port_enqueue_depth = 128,
177         };
178
179         ret = rte_event_dev_configure(opt->dev_id, &config);
180         if (ret) {
181                 evt_err("failed to configure eventdev %d", opt->dev_id);
182                 return ret;
183         }
184
185         struct rte_event_queue_conf q_conf = {
186                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
187                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
188                         .nb_atomic_flows = opt->nb_flows,
189                         .nb_atomic_order_sequences = opt->nb_flows,
190         };
191         /* queue configurations */
192         for (queue = 0; queue < nb_queues; queue++) {
193                 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
194                 if (ret) {
195                         evt_err("failed to setup queue=%d", queue);
196                         return ret;
197                 }
198         }
199
200         ret = perf_event_dev_port_setup(test, opt, 1 /* stride */,
201                                         nb_queues);
202         if (ret)
203                 return ret;
204
205         ret = evt_service_setup(opt->dev_id);
206         if (ret) {
207                 evt_err("No service lcore found to run event dev.");
208                 return ret;
209         }
210
211         ret = rte_event_dev_start(opt->dev_id);
212         if (ret) {
213                 evt_err("failed to start eventdev %d", opt->dev_id);
214                 return ret;
215         }
216
217         return 0;
218 }
219
220 static void
221 perf_atq_opt_dump(struct evt_options *opt)
222 {
223         perf_opt_dump(opt, atq_nb_event_queues(opt));
224 }
225
226 static int
227 perf_atq_opt_check(struct evt_options *opt)
228 {
229         return perf_opt_check(opt, atq_nb_event_queues(opt));
230 }
231
232 static bool
233 perf_atq_capability_check(struct evt_options *opt)
234 {
235         struct rte_event_dev_info dev_info;
236
237         rte_event_dev_info_get(opt->dev_id, &dev_info);
238         if (dev_info.max_event_queues < atq_nb_event_queues(opt) ||
239                         dev_info.max_event_ports < perf_nb_event_ports(opt)) {
240                 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
241                         atq_nb_event_queues(opt), dev_info.max_event_queues,
242                         perf_nb_event_ports(opt), dev_info.max_event_ports);
243         }
244         if (!evt_has_all_types_queue(opt->dev_id))
245                 return false;
246
247         return true;
248 }
249
250 static const struct evt_test_ops perf_atq =  {
251         .cap_check          = perf_atq_capability_check,
252         .opt_check          = perf_atq_opt_check,
253         .opt_dump           = perf_atq_opt_dump,
254         .test_setup         = perf_test_setup,
255         .ethdev_setup       = perf_ethdev_setup,
256         .mempool_setup      = perf_mempool_setup,
257         .eventdev_setup     = perf_atq_eventdev_setup,
258         .launch_lcores      = perf_atq_launch_lcores,
259         .eventdev_destroy   = perf_eventdev_destroy,
260         .mempool_destroy    = perf_mempool_destroy,
261         .test_result        = perf_test_result,
262         .test_destroy       = perf_test_destroy,
263 };
264
265 EVT_TEST_REGISTER(perf_atq);