net/virtio: fix incorrect cast of void *
[dpdk.git] / test-eventdev / test_order_common.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include "test_order_common.h"
6
7 int
8 order_test_result(struct evt_test *test, struct evt_options *opt)
9 {
10         RTE_SET_USED(opt);
11         struct test_order *t = evt_test_priv(test);
12
13         return t->result;
14 }
15
16 static inline int
17 order_producer(void *arg)
18 {
19         struct prod_data *p  = arg;
20         struct test_order *t = p->t;
21         struct evt_options *opt = t->opt;
22         const uint8_t dev_id = p->dev_id;
23         const uint8_t port = p->port_id;
24         struct rte_mempool *pool = t->pool;
25         const uint64_t nb_pkts = t->nb_pkts;
26         uint32_t *producer_flow_seq = t->producer_flow_seq;
27         const uint32_t nb_flows = t->nb_flows;
28         uint64_t count = 0;
29         struct rte_mbuf *m;
30         struct rte_event ev;
31
32         if (opt->verbose_level > 1)
33                 printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
34                          __func__, rte_lcore_id(), dev_id, port, p->queue_id);
35
36         ev.event = 0;
37         ev.op = RTE_EVENT_OP_NEW;
38         ev.queue_id = p->queue_id;
39         ev.sched_type = RTE_SCHED_TYPE_ORDERED;
40         ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
41         ev.event_type =  RTE_EVENT_TYPE_CPU;
42         ev.sub_event_type = 0; /* stage 0 */
43
44         while (count < nb_pkts && t->err == false) {
45                 m = rte_pktmbuf_alloc(pool);
46                 if (m == NULL)
47                         continue;
48
49                 const uint32_t flow = (uintptr_t)m % nb_flows;
50                 /* Maintain seq number per flow */
51                 m->seqn = producer_flow_seq[flow]++;
52
53                 ev.flow_id = flow;
54                 ev.mbuf = m;
55
56                 while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
57                         if (t->err)
58                                 break;
59                         rte_pause();
60                 }
61
62                 count++;
63         }
64         return 0;
65 }
66
67 int
68 order_opt_check(struct evt_options *opt)
69 {
70         if (opt->prod_type != EVT_PROD_TYPE_SYNT) {
71                 evt_err("Invalid producer type '%s' valid producer '%s'",
72                         evt_prod_id_to_name(opt->prod_type),
73                         evt_prod_id_to_name(EVT_PROD_TYPE_SYNT));
74                 return -1;
75         }
76
77         /* 1 producer + N workers + 1 master */
78         if (rte_lcore_count() < 3) {
79                 evt_err("test need minimum 3 lcores");
80                 return -1;
81         }
82
83         /* Validate worker lcores */
84         if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
85                 evt_err("worker lcores overlaps with master lcore");
86                 return -1;
87         }
88
89         if (evt_nr_active_lcores(opt->plcores) == 0) {
90                 evt_err("missing the producer lcore");
91                 return -1;
92         }
93
94         if (evt_nr_active_lcores(opt->plcores) != 1) {
95                 evt_err("only one producer lcore must be selected");
96                 return -1;
97         }
98
99         int plcore = evt_get_first_active_lcore(opt->plcores);
100
101         if (plcore < 0) {
102                 evt_err("failed to find active producer");
103                 return plcore;
104         }
105
106         if (evt_lcores_has_overlap(opt->wlcores, plcore)) {
107                 evt_err("worker lcores overlaps producer lcore");
108                 return -1;
109         }
110         if (evt_has_disabled_lcore(opt->wlcores)) {
111                 evt_err("one or more workers lcores are not enabled");
112                 return -1;
113         }
114         if (!evt_has_active_lcore(opt->wlcores)) {
115                 evt_err("minimum one worker is required");
116                 return -1;
117         }
118
119         /* Validate producer lcore */
120         if (plcore == (int)rte_get_master_lcore()) {
121                 evt_err("producer lcore and master lcore should be different");
122                 return -1;
123         }
124         if (!rte_lcore_is_enabled(plcore)) {
125                 evt_err("producer lcore is not enabled");
126                 return -1;
127         }
128
129         /* Fixups */
130         if (opt->nb_pkts == 0)
131                 opt->nb_pkts = INT64_MAX;
132
133         return 0;
134 }
135
136 int
137 order_test_setup(struct evt_test *test, struct evt_options *opt)
138 {
139         void *test_order;
140
141         test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order),
142                                 RTE_CACHE_LINE_SIZE, opt->socket_id);
143         if (test_order  == NULL) {
144                 evt_err("failed to allocate test_order memory");
145                 goto nomem;
146         }
147         test->test_priv = test_order;
148
149         struct test_order *t = evt_test_priv(test);
150
151         t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq",
152                                  sizeof(*t->producer_flow_seq) * opt->nb_flows,
153                                 RTE_CACHE_LINE_SIZE, opt->socket_id);
154
155         if (t->producer_flow_seq  == NULL) {
156                 evt_err("failed to allocate t->producer_flow_seq memory");
157                 goto prod_nomem;
158         }
159
160         t->expected_flow_seq = rte_zmalloc_socket("test_expected_flow_seq",
161                                  sizeof(*t->expected_flow_seq) * opt->nb_flows,
162                                 RTE_CACHE_LINE_SIZE, opt->socket_id);
163
164         if (t->expected_flow_seq  == NULL) {
165                 evt_err("failed to allocate t->expected_flow_seq memory");
166                 goto exp_nomem;
167         }
168         rte_atomic64_set(&t->outstand_pkts, opt->nb_pkts);
169         t->err = false;
170         t->nb_pkts = opt->nb_pkts;
171         t->nb_flows = opt->nb_flows;
172         t->result = EVT_TEST_FAILED;
173         t->opt = opt;
174         return 0;
175
176 exp_nomem:
177         rte_free(t->producer_flow_seq);
178 prod_nomem:
179         rte_free(test->test_priv);
180 nomem:
181         return -ENOMEM;
182 }
183
184 void
185 order_test_destroy(struct evt_test *test, struct evt_options *opt)
186 {
187         RTE_SET_USED(opt);
188         struct test_order *t = evt_test_priv(test);
189
190         rte_free(t->expected_flow_seq);
191         rte_free(t->producer_flow_seq);
192         rte_free(test->test_priv);
193 }
194
195 int
196 order_mempool_setup(struct evt_test *test, struct evt_options *opt)
197 {
198         struct test_order *t = evt_test_priv(test);
199
200         t->pool  = rte_pktmbuf_pool_create(test->name, opt->pool_sz,
201                                         256 /* Cache */, 0,
202                                         512, /* Use very small mbufs */
203                                         opt->socket_id);
204         if (t->pool == NULL) {
205                 evt_err("failed to create mempool");
206                 return -ENOMEM;
207         }
208
209         return 0;
210 }
211
212 void
213 order_mempool_destroy(struct evt_test *test, struct evt_options *opt)
214 {
215         RTE_SET_USED(opt);
216         struct test_order *t = evt_test_priv(test);
217
218         rte_mempool_free(t->pool);
219 }
220
221 void
222 order_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
223 {
224         RTE_SET_USED(test);
225
226         rte_event_dev_stop(opt->dev_id);
227         rte_event_dev_close(opt->dev_id);
228 }
229
230 void
231 order_opt_dump(struct evt_options *opt)
232 {
233         evt_dump_producer_lcores(opt);
234         evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
235         evt_dump_worker_lcores(opt);
236         evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
237 }
238
239 int
240 order_launch_lcores(struct evt_test *test, struct evt_options *opt,
241                         int (*worker)(void *))
242 {
243         int ret, lcore_id;
244         struct test_order *t = evt_test_priv(test);
245
246         int wkr_idx = 0;
247         /* launch workers */
248         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
249                 if (!(opt->wlcores[lcore_id]))
250                         continue;
251
252                 ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
253                                          lcore_id);
254                 if (ret) {
255                         evt_err("failed to launch worker %d", lcore_id);
256                         return ret;
257                 }
258                 wkr_idx++;
259         }
260
261         /* launch producer */
262         int plcore = evt_get_first_active_lcore(opt->plcores);
263
264         ret = rte_eal_remote_launch(order_producer, &t->prod, plcore);
265         if (ret) {
266                 evt_err("failed to launch order_producer %d", plcore);
267                 return ret;
268         }
269
270         uint64_t cycles = rte_get_timer_cycles();
271         int64_t old_remaining  = -1;
272
273         while (t->err == false) {
274                 uint64_t new_cycles = rte_get_timer_cycles();
275                 int64_t remaining = rte_atomic64_read(&t->outstand_pkts);
276
277                 if (remaining <= 0) {
278                         t->result = EVT_TEST_SUCCESS;
279                         break;
280                 }
281
282                 if (new_cycles - cycles > rte_get_timer_hz() * 1) {
283                         printf(CLGRN"\r%"PRId64""CLNRM, remaining);
284                         fflush(stdout);
285                         if (old_remaining == remaining) {
286                                 rte_event_dev_dump(opt->dev_id, stdout);
287                                 evt_err("No schedules for seconds, deadlock");
288                                 t->err = true;
289                                 rte_smp_wmb();
290                                 break;
291                         }
292                         old_remaining = remaining;
293                         cycles = new_cycles;
294                 }
295         }
296         printf("\r");
297
298         return 0;
299 }
300
301 int
302 order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
303                                 uint8_t nb_workers, uint8_t nb_queues)
304 {
305         int ret;
306         uint8_t port;
307         struct test_order *t = evt_test_priv(test);
308         struct rte_event_dev_info dev_info;
309
310         memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
311         ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
312         if (ret) {
313                 evt_err("failed to get eventdev info %d", opt->dev_id);
314                 return ret;
315         }
316
317         if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
318                 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
319
320         /* port configuration */
321         const struct rte_event_port_conf p_conf = {
322                         .dequeue_depth = opt->wkr_deq_dep,
323                         .enqueue_depth = dev_info.max_event_port_dequeue_depth,
324                         .new_event_threshold = dev_info.max_num_events,
325         };
326
327         /* setup one port per worker, linking to all queues */
328         for (port = 0; port < nb_workers; port++) {
329                 struct worker_data *w = &t->worker[port];
330
331                 w->dev_id = opt->dev_id;
332                 w->port_id = port;
333                 w->t = t;
334
335                 ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
336                 if (ret) {
337                         evt_err("failed to setup port %d", port);
338                         return ret;
339                 }
340
341                 ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
342                 if (ret != nb_queues) {
343                         evt_err("failed to link all queues to port %d", port);
344                         return -EINVAL;
345                 }
346         }
347         struct prod_data *p = &t->prod;
348
349         p->dev_id = opt->dev_id;
350         p->port_id = port; /* last port */
351         p->queue_id = 0;
352         p->t = t;
353
354         ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
355         if (ret) {
356                 evt_err("failed to setup producer port %d", port);
357                 return ret;
358         }
359
360         return ret;
361 }