1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #ifndef _TEST_PERF_COMMON_
6 #define _TEST_PERF_COMMON_
12 #include <rte_cryptodev.h>
13 #include <rte_cycles.h>
14 #include <rte_ethdev.h>
15 #include <rte_event_crypto_adapter.h>
16 #include <rte_event_eth_rx_adapter.h>
17 #include <rte_event_eth_tx_adapter.h>
18 #include <rte_event_timer_adapter.h>
19 #include <rte_eventdev.h>
20 #include <rte_lcore.h>
21 #include <rte_malloc.h>
22 #include <rte_mempool.h>
23 #include <rte_prefetch.h>
25 #include "evt_common.h"
26 #include "evt_options.h"
29 #define TEST_PERF_CA_ID 0
34 uint64_t processed_pkts;
39 } __rte_cache_aligned;
41 struct crypto_adptr_data {
50 struct crypto_adptr_data ca;
52 } __rte_cache_aligned;
55 /* Don't change the offset of "done". Signal handler use this memory
56 * to terminate all lcores work.
59 uint64_t outstand_pkts;
61 enum evt_test_result result;
64 struct rte_mempool *pool;
65 struct prod_data prod[EVT_MAX_PORTS];
66 struct worker_data worker[EVT_MAX_PORTS];
67 struct evt_options *opt;
68 uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
69 struct rte_event_timer_adapter *timer_adptr[
70 RTE_EVENT_TIMER_ADAPTER_NUM_MAX] __rte_cache_aligned;
71 struct rte_mempool *ca_op_pool;
72 struct rte_mempool *ca_sess_pool;
73 struct rte_mempool *ca_sess_priv_pool;
74 struct rte_mempool *ca_asym_sess_pool;
75 } __rte_cache_aligned;
79 struct rte_event_timer tim;
81 char pad[offsetof(struct rte_event_timer, user_meta)];
85 } __rte_cache_aligned;
88 #define MAX_PROD_ENQ_BURST_SIZE 128
90 #define PERF_WORKER_INIT\
91 struct worker_data *w = arg;\
92 struct test_perf *t = w->t;\
93 struct evt_options *opt = t->opt;\
94 const uint8_t dev = w->dev_id;\
95 const uint8_t port = w->port_id;\
96 const uint8_t prod_timer_type = \
97 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR;\
98 const uint8_t prod_crypto_type = \
99 opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR;\
100 uint8_t *const sched_type_list = &t->sched_type_list[0];\
101 struct rte_mempool *const pool = t->pool;\
102 const uint8_t nb_stages = t->opt->nb_stages;\
103 const uint8_t laststage = nb_stages - 1;\
105 void *bufs[16] __rte_cache_aligned;\
106 int const sz = RTE_DIM(bufs);\
107 if (opt->verbose_level > 1)\
108 printf("%s(): lcore %d dev_id %d port=%d\n", __func__,\
109 rte_lcore_id(), dev, port)
111 static __rte_always_inline int
112 perf_process_last_stage(struct rte_mempool *const pool,
113 struct rte_event *const ev, struct worker_data *const w,
114 void *bufs[], int const buf_sz, uint8_t count)
116 /* release fence here ensures event_prt is
117 * stored before updating the number of
118 * processed packets for worker lcores
120 rte_atomic_thread_fence(__ATOMIC_RELEASE);
123 if (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV &&
124 ((struct rte_crypto_op *)ev->event_ptr)->type ==
125 RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
126 struct rte_crypto_op *op = ev->event_ptr;
128 rte_free(op->asym->modex.result.data);
129 rte_crypto_op_free(op);
131 bufs[count++] = ev->event_ptr;
132 if (unlikely(count == buf_sz)) {
134 rte_mempool_put_bulk(pool, bufs, buf_sz);
140 static __rte_always_inline uint8_t
141 perf_process_last_stage_latency(struct rte_mempool *const pool,
142 struct rte_event *const ev, struct worker_data *const w,
143 void *bufs[], int const buf_sz, uint8_t count)
146 struct perf_elt *const m = ev->event_ptr;
148 /* release fence here ensures event_prt is
149 * stored before updating the number of
150 * processed packets for worker lcores
152 rte_atomic_thread_fence(__ATOMIC_RELEASE);
155 if (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV &&
156 ((struct rte_crypto_op *)m)->type ==
157 RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
158 rte_free(((struct rte_crypto_op *)m)->asym->modex.result.data);
159 rte_crypto_op_free((struct rte_crypto_op *)m);
161 bufs[count++] = ev->event_ptr;
162 if (unlikely(count == buf_sz)) {
164 latency = rte_get_timer_cycles() - m->timestamp;
165 rte_mempool_put_bulk(pool, bufs, buf_sz);
167 latency = rte_get_timer_cycles() - m->timestamp;
170 w->latency += latency;
177 perf_nb_event_ports(struct evt_options *opt)
179 return evt_nr_active_lcores(opt->wlcores) +
180 evt_nr_active_lcores(opt->plcores);
183 int perf_test_result(struct evt_test *test, struct evt_options *opt);
184 int perf_opt_check(struct evt_options *opt, uint64_t nb_queues);
185 int perf_test_setup(struct evt_test *test, struct evt_options *opt);
186 int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt);
187 int perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt);
188 int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
189 int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
190 uint8_t stride, uint8_t nb_queues,
191 const struct rte_event_port_conf *port_conf);
192 int perf_event_dev_service_setup(uint8_t dev_id);
193 int perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
194 int (*worker)(void *));
195 void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues);
196 void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
197 void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
198 void perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt);
199 void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
200 void perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
201 void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
202 void perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
203 uint8_t port_id, struct rte_event events[],
204 uint16_t nb_enq, uint16_t nb_deq);
206 #endif /* _TEST_PERF_COMMON_ */