eal: fix shared lib mode detection
[dpdk.git] / app / test-eventdev / test_order_queue.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include "test_order_common.h"
9
10 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
11
12 static __rte_always_inline void
13 order_queue_process_stage_0(struct rte_event *const ev)
14 {
15         ev->queue_id = 1; /* q1 atomic queue */
16         ev->op = RTE_EVENT_OP_FORWARD;
17         ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
18         ev->event_type = RTE_EVENT_TYPE_CPU;
19 }
20
21 static int
22 order_queue_worker(void *arg, const bool flow_id_cap)
23 {
24         ORDER_WORKER_INIT;
25         struct rte_event ev;
26
27         while (t->err == false) {
28                 uint16_t event = rte_event_dequeue_burst(dev_id, port,
29                                         &ev, 1, 0);
30                 if (!event) {
31                         if (rte_atomic64_read(outstand_pkts) <= 0)
32                                 break;
33                         rte_pause();
34                         continue;
35                 }
36
37                 if (!flow_id_cap)
38                         order_flow_id_copy_from_mbuf(t, &ev);
39
40                 if (ev.queue_id == 0) { /* from ordered queue */
41                         order_queue_process_stage_0(&ev);
42                         while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
43                                         != 1)
44                                 rte_pause();
45                 } else if (ev.queue_id == 1) { /* from atomic queue */
46                         order_process_stage_1(t, &ev, nb_flows,
47                                         expected_flow_seq, outstand_pkts);
48                 } else {
49                         order_process_stage_invalid(t, &ev);
50                 }
51         }
52         return 0;
53 }
54
55 static int
56 order_queue_worker_burst(void *arg, const bool flow_id_cap)
57 {
58         ORDER_WORKER_INIT;
59         struct rte_event ev[BURST_SIZE];
60         uint16_t i;
61
62         while (t->err == false) {
63                 uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
64                                 BURST_SIZE, 0);
65
66                 if (nb_rx == 0) {
67                         if (rte_atomic64_read(outstand_pkts) <= 0)
68                                 break;
69                         rte_pause();
70                         continue;
71                 }
72
73                 for (i = 0; i < nb_rx; i++) {
74
75                         if (!flow_id_cap)
76                                 order_flow_id_copy_from_mbuf(t, &ev[i]);
77
78                         if (ev[i].queue_id == 0) { /* from ordered queue */
79                                 order_queue_process_stage_0(&ev[i]);
80                         } else if (ev[i].queue_id == 1) {/* from atomic queue */
81                                 order_process_stage_1(t, &ev[i], nb_flows,
82                                         expected_flow_seq, outstand_pkts);
83                                 ev[i].op = RTE_EVENT_OP_RELEASE;
84                         } else {
85                                 order_process_stage_invalid(t, &ev[i]);
86                         }
87                 }
88
89                 uint16_t enq;
90
91                 enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
92                 while (enq < nb_rx) {
93                         enq += rte_event_enqueue_burst(dev_id, port,
94                                                         ev + enq, nb_rx - enq);
95                 }
96         }
97         return 0;
98 }
99
100 static int
101 worker_wrapper(void *arg)
102 {
103         struct worker_data *w  = arg;
104         const bool burst = evt_has_burst_mode(w->dev_id);
105         const bool flow_id_cap = evt_has_flow_id(w->dev_id);
106
107         if (burst) {
108                 if (flow_id_cap)
109                         return order_queue_worker_burst(arg, true);
110                 else
111                         return order_queue_worker_burst(arg, false);
112         } else {
113                 if (flow_id_cap)
114                         return order_queue_worker(arg, true);
115                 else
116                         return order_queue_worker(arg, false);
117         }
118 }
119
120 static int
121 order_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
122 {
123         return order_launch_lcores(test, opt, worker_wrapper);
124 }
125
126 #define NB_QUEUES 2
127 static int
128 order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
129 {
130         int ret;
131
132         const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
133         /* number of active worker cores + 1 producer */
134         const uint8_t nb_ports = nb_workers + 1;
135
136         ret = evt_configure_eventdev(opt, NB_QUEUES, nb_ports);
137         if (ret) {
138                 evt_err("failed to configure eventdev %d", opt->dev_id);
139                 return ret;
140         }
141
142         /* q0 (ordered queue) configuration */
143         struct rte_event_queue_conf q0_ordered_conf = {
144                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
145                         .schedule_type = RTE_SCHED_TYPE_ORDERED,
146                         .nb_atomic_flows = opt->nb_flows,
147                         .nb_atomic_order_sequences = opt->nb_flows,
148         };
149         ret = rte_event_queue_setup(opt->dev_id, 0, &q0_ordered_conf);
150         if (ret) {
151                 evt_err("failed to setup queue0 eventdev %d", opt->dev_id);
152                 return ret;
153         }
154
155         /* q1 (atomic queue) configuration */
156         struct rte_event_queue_conf q1_atomic_conf = {
157                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
158                         .schedule_type = RTE_SCHED_TYPE_ATOMIC,
159                         .nb_atomic_flows = opt->nb_flows,
160                         .nb_atomic_order_sequences = opt->nb_flows,
161         };
162         ret = rte_event_queue_setup(opt->dev_id, 1, &q1_atomic_conf);
163         if (ret) {
164                 evt_err("failed to setup queue1 eventdev %d", opt->dev_id);
165                 return ret;
166         }
167
168         /* setup one port per worker, linking to all queues */
169         ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
170         if (ret)
171                 return ret;
172
173         if (!evt_has_distributed_sched(opt->dev_id)) {
174                 uint32_t service_id;
175                 rte_event_dev_service_id_get(opt->dev_id, &service_id);
176                 ret = evt_service_setup(service_id);
177                 if (ret) {
178                         evt_err("No service lcore found to run event dev.");
179                         return ret;
180                 }
181         }
182
183         ret = rte_event_dev_start(opt->dev_id);
184         if (ret) {
185                 evt_err("failed to start eventdev %d", opt->dev_id);
186                 return ret;
187         }
188
189         return 0;
190 }
191
192 static void
193 order_queue_opt_dump(struct evt_options *opt)
194 {
195         order_opt_dump(opt);
196         evt_dump("nb_evdev_queues", "%d", NB_QUEUES);
197 }
198
199 static bool
200 order_queue_capability_check(struct evt_options *opt)
201 {
202         struct rte_event_dev_info dev_info;
203
204         rte_event_dev_info_get(opt->dev_id, &dev_info);
205         if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports <
206                         order_nb_event_ports(opt)) {
207                 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
208                         NB_QUEUES, dev_info.max_event_queues,
209                         order_nb_event_ports(opt), dev_info.max_event_ports);
210                 return false;
211         }
212
213         return true;
214 }
215
216 static const struct evt_test_ops order_queue =  {
217         .cap_check          = order_queue_capability_check,
218         .opt_check          = order_opt_check,
219         .opt_dump           = order_queue_opt_dump,
220         .test_setup         = order_test_setup,
221         .mempool_setup      = order_mempool_setup,
222         .eventdev_setup     = order_queue_eventdev_setup,
223         .launch_lcores      = order_queue_launch_lcores,
224         .eventdev_destroy   = order_eventdev_destroy,
225         .mempool_destroy    = order_mempool_destroy,
226         .test_result        = order_test_result,
227         .test_destroy       = order_test_destroy,
228 };
229
230 EVT_TEST_REGISTER(order_queue);