2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2017 Cavium, Inc.
6 #include "test_pipeline_common.h"
8 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
10 static __rte_always_inline int
11 pipeline_queue_nb_event_queues(struct evt_options *opt)
13 uint16_t eth_count = rte_eth_dev_count();
15 return (eth_count * opt->nb_stages) + eth_count;
19 worker_wrapper(void *arg)
22 rte_panic("invalid worker\n");
26 pipeline_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
28 struct test_pipeline *t = evt_test_priv(test);
31 rte_service_component_runstate_set(t->tx_service.service_id, 1);
32 return pipeline_launch_lcores(test, opt, worker_wrapper);
36 pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
41 int nb_stages = opt->nb_stages;
43 struct rte_event_dev_info info;
44 struct test_pipeline *t = evt_test_priv(test);
45 uint8_t tx_evqueue_id = 0;
46 uint8_t queue_arr[RTE_EVENT_MAX_QUEUES_PER_DEV];
47 uint8_t nb_worker_queues = 0;
49 nb_ports = evt_nr_active_lcores(opt->wlcores);
50 nb_queues = rte_eth_dev_count() * (nb_stages);
52 /* Extra port for Tx service. */
54 tx_evqueue_id = nb_queues;
58 nb_queues += rte_eth_dev_count();
60 rte_event_dev_info_get(opt->dev_id, &info);
62 const struct rte_event_dev_config config = {
63 .nb_event_queues = nb_queues,
64 .nb_event_ports = nb_ports,
65 .nb_events_limit = info.max_num_events,
66 .nb_event_queue_flows = opt->nb_flows,
67 .nb_event_port_dequeue_depth =
68 info.max_event_port_dequeue_depth,
69 .nb_event_port_enqueue_depth =
70 info.max_event_port_enqueue_depth,
72 ret = rte_event_dev_configure(opt->dev_id, &config);
74 evt_err("failed to configure eventdev %d", opt->dev_id);
78 struct rte_event_queue_conf q_conf = {
79 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
80 .nb_atomic_flows = opt->nb_flows,
81 .nb_atomic_order_sequences = opt->nb_flows,
83 /* queue configurations */
84 for (queue = 0; queue < nb_queues; queue++) {
88 slot = queue % (nb_stages + 1);
89 q_conf.schedule_type = slot == nb_stages ?
90 RTE_SCHED_TYPE_ATOMIC :
91 opt->sched_type_list[slot];
93 slot = queue % nb_stages;
95 if (queue == tx_evqueue_id) {
96 q_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
97 q_conf.event_queue_cfg =
98 RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
100 q_conf.schedule_type =
101 opt->sched_type_list[slot];
102 queue_arr[nb_worker_queues] = queue;
107 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
109 evt_err("failed to setup queue=%d", queue);
114 /* port configuration */
115 const struct rte_event_port_conf p_conf = {
116 .dequeue_depth = opt->wkr_deq_dep,
117 .enqueue_depth = info.max_event_port_dequeue_depth,
118 .new_event_threshold = info.max_num_events,
122 * If tx is multi thread safe then allow workers to do Tx else use Tx
123 * service to Tx packets.
126 ret = pipeline_event_port_setup(test, opt, queue_arr,
127 nb_worker_queues, p_conf);
131 ret = pipeline_event_tx_service_setup(test, opt, tx_evqueue_id,
132 nb_ports - 1, p_conf);
135 ret = pipeline_event_port_setup(test, opt, NULL, nb_queues,
141 * The pipelines are setup in the following manner:
143 * eth_dev_count = 2, nb_stages = 2.
145 * Multi thread safe :
149 * event queue pipelines:
150 * eth0 -> q0 -> q1 -> (q2->tx)
151 * eth1 -> q3 -> q4 -> (q5->tx)
153 * q2, q5 configured as ATOMIC
155 * Multi thread unsafe :
159 * event queue pipelines:
161 * } (q4->tx) Tx service
164 * q4 configured as SINGLE_LINK|ATOMIC
166 ret = pipeline_event_rx_adapter_setup(opt,
167 t->mt_unsafe ? nb_stages : nb_stages + 1, p_conf);
171 if (!evt_has_distributed_sched(opt->dev_id)) {
173 rte_event_dev_service_id_get(opt->dev_id, &service_id);
174 ret = evt_service_setup(service_id);
176 evt_err("No service lcore found to run event dev.");
181 ret = rte_event_dev_start(opt->dev_id);
183 evt_err("failed to start eventdev %d", opt->dev_id);
191 pipeline_queue_opt_dump(struct evt_options *opt)
193 pipeline_opt_dump(opt, pipeline_queue_nb_event_queues(opt));
197 pipeline_queue_opt_check(struct evt_options *opt)
199 return pipeline_opt_check(opt, pipeline_queue_nb_event_queues(opt));
203 pipeline_queue_capability_check(struct evt_options *opt)
205 struct rte_event_dev_info dev_info;
207 rte_event_dev_info_get(opt->dev_id, &dev_info);
208 if (dev_info.max_event_queues < pipeline_queue_nb_event_queues(opt) ||
209 dev_info.max_event_ports <
210 evt_nr_active_lcores(opt->wlcores)) {
211 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
212 pipeline_queue_nb_event_queues(opt),
213 dev_info.max_event_queues,
214 evt_nr_active_lcores(opt->wlcores),
215 dev_info.max_event_ports);
221 static const struct evt_test_ops pipeline_queue = {
222 .cap_check = pipeline_queue_capability_check,
223 .opt_check = pipeline_queue_opt_check,
224 .opt_dump = pipeline_queue_opt_dump,
225 .test_setup = pipeline_test_setup,
226 .mempool_setup = pipeline_mempool_setup,
227 .ethdev_setup = pipeline_ethdev_setup,
228 .eventdev_setup = pipeline_queue_eventdev_setup,
229 .launch_lcores = pipeline_queue_launch_lcores,
230 .eventdev_destroy = pipeline_eventdev_destroy,
231 .mempool_destroy = pipeline_mempool_destroy,
232 .ethdev_destroy = pipeline_ethdev_destroy,
233 .test_result = pipeline_test_result,
234 .test_destroy = pipeline_test_destroy,
237 EVT_TEST_REGISTER(pipeline_queue);