app/eventdev: add pipeline queue test
[dpdk.git] / app / test-eventdev / test_pipeline_queue.c
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright 2017 Cavium, Inc.
4  */
5
6 #include "test_pipeline_common.h"
7
8 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
9
10 static __rte_always_inline int
11 pipeline_queue_nb_event_queues(struct evt_options *opt)
12 {
13         uint16_t eth_count = rte_eth_dev_count();
14
15         return (eth_count * opt->nb_stages) + eth_count;
16 }
17
18 static int
19 worker_wrapper(void *arg)
20 {
21         RTE_SET_USED(arg);
22         rte_panic("invalid worker\n");
23 }
24
25 static int
26 pipeline_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
27 {
28         struct test_pipeline *t = evt_test_priv(test);
29
30         if (t->mt_unsafe)
31                 rte_service_component_runstate_set(t->tx_service.service_id, 1);
32         return pipeline_launch_lcores(test, opt, worker_wrapper);
33 }
34
35 static int
36 pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
37 {
38         int ret;
39         int nb_ports;
40         int nb_queues;
41         int nb_stages = opt->nb_stages;
42         uint8_t queue;
43         struct rte_event_dev_info info;
44         struct test_pipeline *t = evt_test_priv(test);
45         uint8_t tx_evqueue_id = 0;
46         uint8_t queue_arr[RTE_EVENT_MAX_QUEUES_PER_DEV];
47         uint8_t nb_worker_queues = 0;
48
49         nb_ports = evt_nr_active_lcores(opt->wlcores);
50         nb_queues = rte_eth_dev_count() * (nb_stages);
51
52         /* Extra port for Tx service. */
53         if (t->mt_unsafe) {
54                 tx_evqueue_id = nb_queues;
55                 nb_ports++;
56                 nb_queues++;
57         } else
58                 nb_queues += rte_eth_dev_count();
59
60         rte_event_dev_info_get(opt->dev_id, &info);
61
62         const struct rte_event_dev_config config = {
63                         .nb_event_queues = nb_queues,
64                         .nb_event_ports = nb_ports,
65                         .nb_events_limit  = info.max_num_events,
66                         .nb_event_queue_flows = opt->nb_flows,
67                         .nb_event_port_dequeue_depth =
68                                 info.max_event_port_dequeue_depth,
69                         .nb_event_port_enqueue_depth =
70                                 info.max_event_port_enqueue_depth,
71         };
72         ret = rte_event_dev_configure(opt->dev_id, &config);
73         if (ret) {
74                 evt_err("failed to configure eventdev %d", opt->dev_id);
75                 return ret;
76         }
77
78         struct rte_event_queue_conf q_conf = {
79                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
80                         .nb_atomic_flows = opt->nb_flows,
81                         .nb_atomic_order_sequences = opt->nb_flows,
82         };
83         /* queue configurations */
84         for (queue = 0; queue < nb_queues; queue++) {
85                 uint8_t slot;
86
87                 if (!t->mt_unsafe) {
88                         slot = queue % (nb_stages + 1);
89                         q_conf.schedule_type = slot == nb_stages ?
90                                 RTE_SCHED_TYPE_ATOMIC :
91                                 opt->sched_type_list[slot];
92                 } else {
93                         slot = queue % nb_stages;
94
95                         if (queue == tx_evqueue_id) {
96                                 q_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
97                                 q_conf.event_queue_cfg =
98                                         RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
99                         } else {
100                                 q_conf.schedule_type =
101                                         opt->sched_type_list[slot];
102                                 queue_arr[nb_worker_queues] = queue;
103                                 nb_worker_queues++;
104                         }
105                 }
106
107                 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
108                 if (ret) {
109                         evt_err("failed to setup queue=%d", queue);
110                         return ret;
111                 }
112         }
113
114         /* port configuration */
115         const struct rte_event_port_conf p_conf = {
116                         .dequeue_depth = opt->wkr_deq_dep,
117                         .enqueue_depth = info.max_event_port_dequeue_depth,
118                         .new_event_threshold = info.max_num_events,
119         };
120
121         /*
122          * If tx is multi thread safe then allow workers to do Tx else use Tx
123          * service to Tx packets.
124          */
125         if (t->mt_unsafe) {
126                 ret = pipeline_event_port_setup(test, opt, queue_arr,
127                                 nb_worker_queues, p_conf);
128                 if (ret)
129                         return ret;
130
131                 ret = pipeline_event_tx_service_setup(test, opt, tx_evqueue_id,
132                                 nb_ports - 1, p_conf);
133
134         } else
135                 ret = pipeline_event_port_setup(test, opt, NULL, nb_queues,
136                                 p_conf);
137
138         if (ret)
139                 return ret;
140         /*
141          * The pipelines are setup in the following manner:
142          *
143          * eth_dev_count = 2, nb_stages = 2.
144          *
145          * Multi thread safe :
146          *      queues = 6
147          *      stride = 3
148          *
149          *      event queue pipelines:
150          *      eth0 -> q0 -> q1 -> (q2->tx)
151          *      eth1 -> q3 -> q4 -> (q5->tx)
152          *
153          *      q2, q5 configured as ATOMIC
154          *
155          * Multi thread unsafe :
156          *      queues = 5
157          *      stride = 2
158          *
159          *      event queue pipelines:
160          *      eth0 -> q0 -> q1
161          *                      } (q4->tx) Tx service
162          *      eth1 -> q2 -> q3
163          *
164          *      q4 configured as SINGLE_LINK|ATOMIC
165          */
166         ret = pipeline_event_rx_adapter_setup(opt,
167                         t->mt_unsafe ? nb_stages : nb_stages + 1, p_conf);
168         if (ret)
169                 return ret;
170
171         if (!evt_has_distributed_sched(opt->dev_id)) {
172                 uint32_t service_id;
173                 rte_event_dev_service_id_get(opt->dev_id, &service_id);
174                 ret = evt_service_setup(service_id);
175                 if (ret) {
176                         evt_err("No service lcore found to run event dev.");
177                         return ret;
178                 }
179         }
180
181         ret = rte_event_dev_start(opt->dev_id);
182         if (ret) {
183                 evt_err("failed to start eventdev %d", opt->dev_id);
184                 return ret;
185         }
186
187         return 0;
188 }
189
190 static void
191 pipeline_queue_opt_dump(struct evt_options *opt)
192 {
193         pipeline_opt_dump(opt, pipeline_queue_nb_event_queues(opt));
194 }
195
196 static int
197 pipeline_queue_opt_check(struct evt_options *opt)
198 {
199         return pipeline_opt_check(opt, pipeline_queue_nb_event_queues(opt));
200 }
201
202 static bool
203 pipeline_queue_capability_check(struct evt_options *opt)
204 {
205         struct rte_event_dev_info dev_info;
206
207         rte_event_dev_info_get(opt->dev_id, &dev_info);
208         if (dev_info.max_event_queues < pipeline_queue_nb_event_queues(opt) ||
209                         dev_info.max_event_ports <
210                         evt_nr_active_lcores(opt->wlcores)) {
211                 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
212                         pipeline_queue_nb_event_queues(opt),
213                         dev_info.max_event_queues,
214                         evt_nr_active_lcores(opt->wlcores),
215                         dev_info.max_event_ports);
216         }
217
218         return true;
219 }
220
221 static const struct evt_test_ops pipeline_queue =  {
222         .cap_check          = pipeline_queue_capability_check,
223         .opt_check          = pipeline_queue_opt_check,
224         .opt_dump           = pipeline_queue_opt_dump,
225         .test_setup         = pipeline_test_setup,
226         .mempool_setup      = pipeline_mempool_setup,
227         .ethdev_setup       = pipeline_ethdev_setup,
228         .eventdev_setup     = pipeline_queue_eventdev_setup,
229         .launch_lcores      = pipeline_queue_launch_lcores,
230         .eventdev_destroy   = pipeline_eventdev_destroy,
231         .mempool_destroy    = pipeline_mempool_destroy,
232         .ethdev_destroy     = pipeline_ethdev_destroy,
233         .test_result        = pipeline_test_result,
234         .test_destroy       = pipeline_test_destroy,
235 };
236
237 EVT_TEST_REGISTER(pipeline_queue);