1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
7 #include <rte_crypto.h>
8 #include <rte_cryptodev.h>
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
12 #include "cperf_ops.h"
13 #include "cperf_test_pmd_cyclecount.h"
14 #include "cperf_test_common.h"
16 #define PRETTY_HDR_FMT "%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n"
17 #define PRETTY_LINE_FMT "%12u%12u%12u%12u%12u%12u%12u%12.0f%12.0f%12.0f\n"
18 #define CSV_HDR_FMT "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"
19 #define CSV_LINE_FMT "%10u;%10u;%u;%u;%u;%u;%u;%.f3;%.f3;%.f3\n"
21 struct cperf_pmd_cyclecount_ctx {
26 struct rte_mempool *pool;
27 struct rte_crypto_op **ops;
28 struct rte_crypto_op **ops_processed;
30 struct rte_cryptodev_sym_session *sess;
32 cperf_populate_ops_t populate_ops;
34 uint32_t src_buf_offset;
35 uint32_t dst_buf_offset;
37 const struct cperf_options *options;
38 const struct cperf_test_vector *test_vector;
41 struct pmd_cyclecount_state {
42 struct cperf_pmd_cyclecount_ctx *ctx;
43 const struct cperf_options *opts;
49 uint32_t ops_enq_retries;
50 uint32_t ops_deq_retries;
51 double cycles_per_build;
52 double cycles_per_enq;
53 double cycles_per_deq;
56 static const uint16_t iv_offset =
57 sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op);
60 cperf_pmd_cyclecount_test_free(struct cperf_pmd_cyclecount_ctx *ctx)
64 rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
65 rte_cryptodev_sym_session_free(ctx->sess);
69 rte_mempool_free(ctx->pool);
74 if (ctx->ops_processed)
75 rte_free(ctx->ops_processed);
82 cperf_pmd_cyclecount_test_constructor(struct rte_mempool *sess_mp,
83 struct rte_mempool *sess_priv_mp,
84 uint8_t dev_id, uint16_t qp_id,
85 const struct cperf_options *options,
86 const struct cperf_test_vector *test_vector,
87 const struct cperf_op_fns *op_fns)
89 struct cperf_pmd_cyclecount_ctx *ctx = NULL;
91 /* preallocate buffers for crypto ops as they can get quite big */
92 size_t alloc_sz = sizeof(struct rte_crypto_op *) *
93 options->nb_descriptors;
95 ctx = rte_malloc(NULL, sizeof(struct cperf_pmd_cyclecount_ctx), 0);
102 ctx->populate_ops = op_fns->populate_ops;
103 ctx->options = options;
104 ctx->test_vector = test_vector;
106 /* IV goes at the end of the crypto operation */
107 uint16_t iv_offset = sizeof(struct rte_crypto_op) +
108 sizeof(struct rte_crypto_sym_op);
110 ctx->sess = op_fns->sess_create(sess_mp, sess_priv_mp, dev_id, options,
111 test_vector, iv_offset);
112 if (ctx->sess == NULL)
115 if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
116 &ctx->src_buf_offset, &ctx->dst_buf_offset,
120 ctx->ops = rte_malloc("ops", alloc_sz, 0);
124 ctx->ops_processed = rte_malloc("ops_processed", alloc_sz, 0);
125 if (!ctx->ops_processed)
131 cperf_pmd_cyclecount_test_free(ctx);
136 /* benchmark alloc-build-free of ops */
138 pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op,
139 uint16_t test_burst_size)
141 uint32_t iter_ops_left = state->opts->total_ops - cur_op;
142 uint32_t iter_ops_needed =
143 RTE_MIN(state->opts->nb_descriptors, iter_ops_left);
144 uint32_t cur_iter_op;
145 uint32_t imix_idx = 0;
147 for (cur_iter_op = 0; cur_iter_op < iter_ops_needed;
148 cur_iter_op += test_burst_size) {
149 uint32_t burst_size = RTE_MIN(iter_ops_needed - cur_iter_op,
151 struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
153 /* Allocate objects containing crypto operations and mbufs */
154 if (rte_mempool_get_bulk(state->ctx->pool, (void **)ops,
157 "Failed to allocate more crypto operations "
158 "from the crypto operation pool.\n"
159 "Consider increasing the pool size "
164 /* Setup crypto op, attach mbuf etc */
165 (state->ctx->populate_ops)(ops,
166 state->ctx->src_buf_offset,
167 state->ctx->dst_buf_offset,
169 state->ctx->sess, state->opts,
170 state->ctx->test_vector, iv_offset,
173 #ifdef CPERF_LINEARIZATION_ENABLE
174 /* Check if source mbufs require coalescing */
175 if (state->linearize) {
177 for (i = 0; i < burst_size; i++) {
178 struct rte_mbuf *src = ops[i]->sym->m_src;
179 rte_pktmbuf_linearize(src);
182 #endif /* CPERF_LINEARIZATION_ENABLE */
183 rte_mempool_put_bulk(state->ctx->pool, (void **)ops,
190 /* allocate and build ops (no free) */
192 pmd_cyclecount_build_ops(struct pmd_cyclecount_state *state,
193 uint32_t iter_ops_needed, uint16_t test_burst_size)
195 uint32_t cur_iter_op;
196 uint32_t imix_idx = 0;
198 for (cur_iter_op = 0; cur_iter_op < iter_ops_needed;
199 cur_iter_op += test_burst_size) {
200 uint32_t burst_size = RTE_MIN(
201 iter_ops_needed - cur_iter_op, test_burst_size);
202 struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
204 /* Allocate objects containing crypto operations and mbufs */
205 if (rte_mempool_get_bulk(state->ctx->pool, (void **)ops,
208 "Failed to allocate more crypto operations "
209 "from the crypto operation pool.\n"
210 "Consider increasing the pool size "
215 /* Setup crypto op, attach mbuf etc */
216 (state->ctx->populate_ops)(ops,
217 state->ctx->src_buf_offset,
218 state->ctx->dst_buf_offset,
220 state->ctx->sess, state->opts,
221 state->ctx->test_vector, iv_offset,
227 /* benchmark enqueue, returns number of ops enqueued */
229 pmd_cyclecount_bench_enq(struct pmd_cyclecount_state *state,
230 uint32_t iter_ops_needed, uint16_t test_burst_size)
232 /* Enqueue full descriptor ring of ops on crypto device */
233 uint32_t cur_iter_op = 0;
234 while (cur_iter_op < iter_ops_needed) {
235 uint32_t burst_size = RTE_MIN(iter_ops_needed - cur_iter_op,
237 struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
240 burst_enqd = rte_cryptodev_enqueue_burst(state->ctx->dev_id,
241 state->ctx->qp_id, ops, burst_size);
243 /* if we couldn't enqueue anything, the queue is full */
245 /* don't try to dequeue anything we didn't enqueue */
249 if (burst_enqd < burst_size)
250 state->ops_enq_retries++;
251 state->ops_enqd += burst_enqd;
252 cur_iter_op += burst_enqd;
254 return iter_ops_needed;
257 /* benchmark dequeue */
259 pmd_cyclecount_bench_deq(struct pmd_cyclecount_state *state,
260 uint32_t iter_ops_needed, uint16_t test_burst_size)
262 /* Dequeue full descriptor ring of ops on crypto device */
263 uint32_t cur_iter_op = 0;
264 while (cur_iter_op < iter_ops_needed) {
265 uint32_t burst_size = RTE_MIN(iter_ops_needed - cur_iter_op,
267 struct rte_crypto_op **ops_processed =
268 &state->ctx->ops[cur_iter_op];
271 burst_deqd = rte_cryptodev_dequeue_burst(state->ctx->dev_id,
272 state->ctx->qp_id, ops_processed, burst_size);
274 if (burst_deqd < burst_size)
275 state->ops_deq_retries++;
276 state->ops_deqd += burst_deqd;
277 cur_iter_op += burst_deqd;
281 /* run benchmark per burst size */
283 pmd_cyclecount_bench_burst_sz(
284 struct pmd_cyclecount_state *state, uint16_t test_burst_size)
293 /* reset all counters */
297 state->ops_enq_retries = 0;
299 state->ops_deq_retries = 0;
302 * Benchmark crypto op alloc-build-free separately.
304 tsc_start = rte_rdtsc_precise();
306 for (cur_op = 0; cur_op < state->opts->total_ops;
307 cur_op += state->opts->nb_descriptors) {
308 if (unlikely(pmd_cyclecount_bench_ops(
309 state, cur_op, test_burst_size)))
313 tsc_end = rte_rdtsc_precise();
314 tsc_op = tsc_end - tsc_start;
318 * Hardware acceleration cyclecount benchmarking loop.
320 * We're benchmarking raw enq/deq performance by filling up the device
321 * queue, so we never get any failed enqs unless the driver won't accept
322 * the exact number of descriptors we requested, or the driver won't
323 * wrap around the end of the TX ring. However, since we're only
324 * dequeueing once we've filled up the queue, we have to benchmark it
325 * piecemeal and then average out the results.
328 while (cur_op < state->opts->total_ops) {
329 uint32_t iter_ops_left = state->opts->total_ops - cur_op;
330 uint32_t iter_ops_needed = RTE_MIN(
331 state->opts->nb_descriptors, iter_ops_left);
332 uint32_t iter_ops_allocd = iter_ops_needed;
334 /* allocate and build ops */
335 if (unlikely(pmd_cyclecount_build_ops(state, iter_ops_needed,
339 tsc_start = rte_rdtsc_precise();
341 /* fill up TX ring */
342 iter_ops_needed = pmd_cyclecount_bench_enq(state,
343 iter_ops_needed, test_burst_size);
345 tsc_end = rte_rdtsc_precise();
347 tsc_enq += tsc_end - tsc_start;
349 /* allow for HW to catch up */
351 rte_delay_us_block(state->delay);
353 tsc_start = rte_rdtsc_precise();
356 pmd_cyclecount_bench_deq(state, iter_ops_needed,
359 tsc_end = rte_rdtsc_precise();
361 tsc_deq += tsc_end - tsc_start;
363 cur_op += iter_ops_needed;
366 * we may not have processed all ops that we allocated, so
367 * free everything we've allocated.
369 rte_mempool_put_bulk(state->ctx->pool,
370 (void **)state->ctx->ops, iter_ops_allocd);
373 state->cycles_per_build = (double)tsc_op / state->opts->total_ops;
374 state->cycles_per_enq = (double)tsc_enq / state->ops_enqd;
375 state->cycles_per_deq = (double)tsc_deq / state->ops_deqd;
381 cperf_pmd_cyclecount_test_runner(void *test_ctx)
383 struct pmd_cyclecount_state state = {0};
384 const struct cperf_options *opts;
385 uint16_t test_burst_size;
386 uint8_t burst_size_idx = 0;
388 state.ctx = test_ctx;
389 opts = state.ctx->options;
391 state.lcore = rte_lcore_id();
394 static int only_once;
395 static bool warmup = true;
398 * We need a small delay to allow for hardware to process all the crypto
399 * operations. We can't automatically figure out what the delay should
400 * be, so we leave it up to the user (by default it's 0).
402 state.delay = 1000 * opts->pmdcc_delay;
404 #ifdef CPERF_LINEARIZATION_ENABLE
405 struct rte_cryptodev_info dev_info;
407 /* Check if source mbufs require coalescing */
408 if (opts->segments_sz < ctx->options->max_buffer_size) {
409 rte_cryptodev_info_get(state.ctx->dev_id, &dev_info);
410 if ((dev_info.feature_flags &
411 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) ==
416 #endif /* CPERF_LINEARIZATION_ENABLE */
418 state.ctx->lcore_id = state.lcore;
420 /* Get first size from range or list */
421 if (opts->inc_burst_size != 0)
422 test_burst_size = opts->min_burst_size;
424 test_burst_size = opts->burst_size_list[0];
426 while (test_burst_size <= opts->max_burst_size) {
427 /* do a benchmark run */
428 if (pmd_cyclecount_bench_burst_sz(&state, test_burst_size))
432 * First run is always a warm up run.
441 printf(PRETTY_HDR_FMT, "lcore id", "Buf Size",
442 "Burst Size", "Enqueued",
443 "Dequeued", "Enq Retries",
444 "Deq Retries", "Cycles/Op",
445 "Cycles/Enq", "Cycles/Deq");
448 printf(PRETTY_LINE_FMT, state.ctx->lcore_id,
449 opts->test_buffer_size, test_burst_size,
450 state.ops_enqd, state.ops_deqd,
451 state.ops_enq_retries,
452 state.ops_deq_retries,
453 state.cycles_per_build,
454 state.cycles_per_enq,
455 state.cycles_per_deq);
458 printf(CSV_HDR_FMT, "# lcore id", "Buf Size",
459 "Burst Size", "Enqueued",
460 "Dequeued", "Enq Retries",
461 "Deq Retries", "Cycles/Op",
462 "Cycles/Enq", "Cycles/Deq");
465 printf(CSV_LINE_FMT, state.ctx->lcore_id,
466 opts->test_buffer_size, test_burst_size,
467 state.ops_enqd, state.ops_deqd,
468 state.ops_enq_retries,
469 state.ops_deq_retries,
470 state.cycles_per_build,
471 state.cycles_per_enq,
472 state.cycles_per_deq);
475 /* Get next size from range or list */
476 if (opts->inc_burst_size != 0)
477 test_burst_size += opts->inc_burst_size;
479 if (++burst_size_idx == opts->burst_size_count)
481 test_burst_size = opts->burst_size_list[burst_size_idx];
489 cperf_pmd_cyclecount_test_destructor(void *arg)
491 struct cperf_pmd_cyclecount_ctx *ctx = arg;
496 cperf_pmd_cyclecount_test_free(ctx);