net/virtio: fix incorrect cast of void *
[dpdk.git] / test-eventdev / test_order_queue.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include "test_order_common.h"
9
10 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
11
12 static __rte_always_inline void
13 order_queue_process_stage_0(struct rte_event *const ev)
14 {
15         ev->queue_id = 1; /* q1 atomic queue */
16         ev->op = RTE_EVENT_OP_FORWARD;
17         ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
18         ev->event_type = RTE_EVENT_TYPE_CPU;
19 }
20
21 static int
22 order_queue_worker(void *arg)
23 {
24         ORDER_WORKER_INIT;
25         struct rte_event ev;
26
27         while (t->err == false) {
28                 uint16_t event = rte_event_dequeue_burst(dev_id, port,
29                                         &ev, 1, 0);
30                 if (!event) {
31                         if (rte_atomic64_read(outstand_pkts) <= 0)
32                                 break;
33                         rte_pause();
34                         continue;
35                 }
36
37                 if (ev.queue_id == 0) { /* from ordered queue */
38                         order_queue_process_stage_0(&ev);
39                         while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
40                                         != 1)
41                                 rte_pause();
42                 } else if (ev.queue_id == 1) { /* from atomic queue */
43                         order_process_stage_1(t, &ev, nb_flows,
44                                         expected_flow_seq, outstand_pkts);
45                 } else {
46                         order_process_stage_invalid(t, &ev);
47                 }
48         }
49         return 0;
50 }
51
52 static int
53 order_queue_worker_burst(void *arg)
54 {
55         ORDER_WORKER_INIT;
56         struct rte_event ev[BURST_SIZE];
57         uint16_t i;
58
59         while (t->err == false) {
60                 uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
61                                 BURST_SIZE, 0);
62
63                 if (nb_rx == 0) {
64                         if (rte_atomic64_read(outstand_pkts) <= 0)
65                                 break;
66                         rte_pause();
67                         continue;
68                 }
69
70                 for (i = 0; i < nb_rx; i++) {
71                         if (ev[i].queue_id == 0) { /* from ordered queue */
72                                 order_queue_process_stage_0(&ev[i]);
73                         } else if (ev[i].queue_id == 1) {/* from atomic queue */
74                                 order_process_stage_1(t, &ev[i], nb_flows,
75                                         expected_flow_seq, outstand_pkts);
76                                 ev[i].op = RTE_EVENT_OP_RELEASE;
77                         } else {
78                                 order_process_stage_invalid(t, &ev[i]);
79                         }
80                 }
81
82                 uint16_t enq;
83
84                 enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
85                 while (enq < nb_rx) {
86                         enq += rte_event_enqueue_burst(dev_id, port,
87                                                         ev + enq, nb_rx - enq);
88                 }
89         }
90         return 0;
91 }
92
93 static int
94 worker_wrapper(void *arg)
95 {
96         struct worker_data *w  = arg;
97         const bool burst = evt_has_burst_mode(w->dev_id);
98
99         if (burst)
100                 return order_queue_worker_burst(arg);
101         else
102                 return order_queue_worker(arg);
103 }
104
105 static int
106 order_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
107 {
108         return order_launch_lcores(test, opt, worker_wrapper);
109 }
110
111 #define NB_QUEUES 2
112 static int
113 order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
114 {
115         int ret;
116
117         const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
118         /* number of active worker cores + 1 producer */
119         const uint8_t nb_ports = nb_workers + 1;
120
121         ret = evt_configure_eventdev(opt, NB_QUEUES, nb_ports);
122         if (ret) {
123                 evt_err("failed to configure eventdev %d", opt->dev_id);
124                 return ret;
125         }
126
127         /* q0 (ordered queue) configuration */
128         struct rte_event_queue_conf q0_ordered_conf = {
129                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
130                         .schedule_type = RTE_SCHED_TYPE_ORDERED,
131                         .nb_atomic_flows = opt->nb_flows,
132                         .nb_atomic_order_sequences = opt->nb_flows,
133         };
134         ret = rte_event_queue_setup(opt->dev_id, 0, &q0_ordered_conf);
135         if (ret) {
136                 evt_err("failed to setup queue0 eventdev %d", opt->dev_id);
137                 return ret;
138         }
139
140         /* q1 (atomic queue) configuration */
141         struct rte_event_queue_conf q1_atomic_conf = {
142                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
143                         .schedule_type = RTE_SCHED_TYPE_ATOMIC,
144                         .nb_atomic_flows = opt->nb_flows,
145                         .nb_atomic_order_sequences = opt->nb_flows,
146         };
147         ret = rte_event_queue_setup(opt->dev_id, 1, &q1_atomic_conf);
148         if (ret) {
149                 evt_err("failed to setup queue1 eventdev %d", opt->dev_id);
150                 return ret;
151         }
152
153         /* setup one port per worker, linking to all queues */
154         ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
155         if (ret)
156                 return ret;
157
158         if (!evt_has_distributed_sched(opt->dev_id)) {
159                 uint32_t service_id;
160                 rte_event_dev_service_id_get(opt->dev_id, &service_id);
161                 ret = evt_service_setup(service_id);
162                 if (ret) {
163                         evt_err("No service lcore found to run event dev.");
164                         return ret;
165                 }
166         }
167
168         ret = rte_event_dev_start(opt->dev_id);
169         if (ret) {
170                 evt_err("failed to start eventdev %d", opt->dev_id);
171                 return ret;
172         }
173
174         return 0;
175 }
176
177 static void
178 order_queue_opt_dump(struct evt_options *opt)
179 {
180         order_opt_dump(opt);
181         evt_dump("nb_evdev_queues", "%d", NB_QUEUES);
182 }
183
184 static bool
185 order_queue_capability_check(struct evt_options *opt)
186 {
187         struct rte_event_dev_info dev_info;
188
189         rte_event_dev_info_get(opt->dev_id, &dev_info);
190         if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports <
191                         order_nb_event_ports(opt)) {
192                 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
193                         NB_QUEUES, dev_info.max_event_queues,
194                         order_nb_event_ports(opt), dev_info.max_event_ports);
195                 return false;
196         }
197
198         return true;
199 }
200
201 static const struct evt_test_ops order_queue =  {
202         .cap_check          = order_queue_capability_check,
203         .opt_check          = order_opt_check,
204         .opt_dump           = order_queue_opt_dump,
205         .test_setup         = order_test_setup,
206         .mempool_setup      = order_mempool_setup,
207         .eventdev_setup     = order_queue_eventdev_setup,
208         .launch_lcores      = order_queue_launch_lcores,
209         .eventdev_destroy   = order_eventdev_destroy,
210         .mempool_destroy    = order_mempool_destroy,
211         .test_result        = order_test_result,
212         .test_destroy       = order_test_destroy,
213 };
214
215 EVT_TEST_REGISTER(order_queue);