1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "test_perf_common.h"
7 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
10 atq_nb_event_queues(struct evt_options *opt)
12 /* nb_queues = number of producers */
13 return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
14 rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
17 static __rte_always_inline void
18 atq_mark_fwd_latency(struct rte_event *const ev)
20 if (unlikely(ev->sub_event_type == 0)) {
21 struct perf_elt *const m = ev->event_ptr;
23 m->timestamp = rte_get_timer_cycles();
27 static __rte_always_inline void
28 atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
29 const uint8_t nb_stages)
32 ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages];
33 ev->op = RTE_EVENT_OP_FORWARD;
34 ev->event_type = RTE_EVENT_TYPE_CPU;
38 perf_atq_worker(void *arg, const int enable_fwd_latency)
43 while (t->done == false) {
44 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
51 if (prod_crypto_type &&
52 (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
53 struct rte_crypto_op *op = ev.event_ptr;
55 if (op->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
56 if (op->sym->m_dst == NULL)
57 ev.event_ptr = op->sym->m_src;
59 ev.event_ptr = op->sym->m_dst;
60 rte_crypto_op_free(op);
62 rte_crypto_op_free(op);
67 if (enable_fwd_latency && !prod_timer_type)
68 /* first stage in pipeline, mark ts to compute fwd latency */
69 atq_mark_fwd_latency(&ev);
71 /* last stage in pipeline */
72 if (unlikely((ev.sub_event_type % nb_stages) == laststage)) {
73 if (enable_fwd_latency)
74 cnt = perf_process_last_stage_latency(pool,
75 &ev, w, bufs, sz, cnt);
77 cnt = perf_process_last_stage(pool, &ev, w,
80 atq_fwd_event(&ev, sched_type_list, nb_stages);
81 while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
89 perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
93 /* +1 to avoid prefetch out of array check */
94 struct rte_event ev[BURST_SIZE + 1];
96 while (t->done == false) {
97 uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
105 for (i = 0; i < nb_rx; i++) {
106 if (prod_crypto_type &&
107 (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
108 struct rte_crypto_op *op = ev[i].event_ptr;
111 RTE_CRYPTO_OP_STATUS_SUCCESS) {
112 if (op->sym->m_dst == NULL)
118 rte_crypto_op_free(op);
120 rte_crypto_op_free(op);
125 if (enable_fwd_latency && !prod_timer_type) {
126 rte_prefetch0(ev[i+1].event_ptr);
127 /* first stage in pipeline.
128 * mark time stamp to compute fwd latency
130 atq_mark_fwd_latency(&ev[i]);
132 /* last stage in pipeline */
133 if (unlikely((ev[i].sub_event_type % nb_stages)
135 if (enable_fwd_latency)
136 cnt = perf_process_last_stage_latency(
137 pool, &ev[i], w, bufs, sz, cnt);
139 cnt = perf_process_last_stage(pool,
140 &ev[i], w, bufs, sz, cnt);
142 ev[i].op = RTE_EVENT_OP_RELEASE;
144 atq_fwd_event(&ev[i], sched_type_list,
151 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
152 while (enq < nb_rx) {
153 enq += rte_event_enqueue_burst(dev, port,
154 ev + enq, nb_rx - enq);
161 worker_wrapper(void *arg)
163 struct worker_data *w = arg;
164 struct evt_options *opt = w->t->opt;
166 const bool burst = evt_has_burst_mode(w->dev_id);
167 const int fwd_latency = opt->fwd_latency;
169 /* allow compiler to optimize */
170 if (!burst && !fwd_latency)
171 return perf_atq_worker(arg, 0);
172 else if (!burst && fwd_latency)
173 return perf_atq_worker(arg, 1);
174 else if (burst && !fwd_latency)
175 return perf_atq_worker_burst(arg, 0);
176 else if (burst && fwd_latency)
177 return perf_atq_worker_burst(arg, 1);
179 rte_panic("invalid worker\n");
183 perf_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
185 return perf_launch_lcores(test, opt, worker_wrapper);
189 perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
196 struct rte_event_dev_info dev_info;
197 struct test_perf *t = evt_test_priv(test);
199 nb_ports = evt_nr_active_lcores(opt->wlcores);
200 nb_ports += (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
201 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) ? 0 :
202 evt_nr_active_lcores(opt->plcores);
204 nb_queues = atq_nb_event_queues(opt);
206 memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
207 ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
209 evt_err("failed to get eventdev info %d", opt->dev_id);
213 ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
215 evt_err("failed to configure eventdev %d", opt->dev_id);
219 struct rte_event_queue_conf q_conf = {
220 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
221 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
222 .nb_atomic_flows = opt->nb_flows,
223 .nb_atomic_order_sequences = opt->nb_flows,
225 /* queue configurations */
226 for (queue = 0; queue < nb_queues; queue++) {
227 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
229 evt_err("failed to setup queue=%d", queue);
234 if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
235 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
237 /* port configuration */
238 const struct rte_event_port_conf p_conf = {
239 .dequeue_depth = opt->wkr_deq_dep,
240 .enqueue_depth = dev_info.max_event_port_dequeue_depth,
241 .new_event_threshold = dev_info.max_num_events,
244 ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues,
249 if (!evt_has_distributed_sched(opt->dev_id)) {
251 rte_event_dev_service_id_get(opt->dev_id, &service_id);
252 ret = evt_service_setup(service_id);
254 evt_err("No service lcore found to run event dev.");
259 ret = rte_event_dev_start(opt->dev_id);
261 evt_err("failed to start eventdev %d", opt->dev_id);
265 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
266 RTE_ETH_FOREACH_DEV(prod) {
267 ret = rte_eth_dev_start(prod);
269 evt_err("Ethernet dev [%d] failed to start. Using synthetic producer",
274 ret = rte_event_eth_rx_adapter_start(prod);
276 evt_err("Rx adapter[%d] start failed", prod);
279 printf("%s: Port[%d] using Rx adapter[%d] started\n",
280 __func__, prod, prod);
282 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
283 for (prod = 0; prod < opt->nb_timer_adptrs; prod++) {
284 ret = rte_event_timer_adapter_start(
285 t->timer_adptr[prod]);
287 evt_err("failed to Start event timer adapter %d"
292 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
293 uint8_t cdev_id, cdev_count;
295 cdev_count = rte_cryptodev_count();
296 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
297 ret = rte_cryptodev_start(cdev_id);
299 evt_err("Failed to start cryptodev %u",
310 perf_atq_opt_dump(struct evt_options *opt)
312 perf_opt_dump(opt, atq_nb_event_queues(opt));
316 perf_atq_opt_check(struct evt_options *opt)
318 return perf_opt_check(opt, atq_nb_event_queues(opt));
322 perf_atq_capability_check(struct evt_options *opt)
324 struct rte_event_dev_info dev_info;
326 rte_event_dev_info_get(opt->dev_id, &dev_info);
327 if (dev_info.max_event_queues < atq_nb_event_queues(opt) ||
328 dev_info.max_event_ports < perf_nb_event_ports(opt)) {
329 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
330 atq_nb_event_queues(opt), dev_info.max_event_queues,
331 perf_nb_event_ports(opt), dev_info.max_event_ports);
333 if (!evt_has_all_types_queue(opt->dev_id))
339 static const struct evt_test_ops perf_atq = {
340 .cap_check = perf_atq_capability_check,
341 .opt_check = perf_atq_opt_check,
342 .opt_dump = perf_atq_opt_dump,
343 .test_setup = perf_test_setup,
344 .ethdev_setup = perf_ethdev_setup,
345 .cryptodev_setup = perf_cryptodev_setup,
346 .mempool_setup = perf_mempool_setup,
347 .eventdev_setup = perf_atq_eventdev_setup,
348 .launch_lcores = perf_atq_launch_lcores,
349 .eventdev_destroy = perf_eventdev_destroy,
350 .mempool_destroy = perf_mempool_destroy,
351 .ethdev_destroy = perf_ethdev_destroy,
352 .cryptodev_destroy = perf_cryptodev_destroy,
353 .test_result = perf_test_result,
354 .test_destroy = perf_test_destroy,
357 EVT_TEST_REGISTER(perf_atq);