4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_crypto.h>
36 #include <rte_cryptodev.h>
37 #include <rte_cycles.h>
38 #include <rte_malloc.h>
40 #include "cperf_ops.h"
41 #include "cperf_test_pmd_cyclecount.h"
42 #include "cperf_test_common.h"
44 #define PRETTY_HDR_FMT "%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n"
45 #define PRETTY_LINE_FMT "%12u%12u%12u%12u%12u%12u%12u%12.0f%12.0f%12.0f\n"
46 #define CSV_HDR_FMT "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"
47 #define CSV_LINE_FMT "%10u;%10u;%u;%u;%u;%u;%u;%.f3;%.f3;%.f3\n"
49 struct cperf_pmd_cyclecount_ctx {
54 struct rte_mempool *pkt_mbuf_pool_in;
55 struct rte_mempool *pkt_mbuf_pool_out;
56 struct rte_mbuf **mbufs_in;
57 struct rte_mbuf **mbufs_out;
59 struct rte_mempool *crypto_op_pool;
60 struct rte_crypto_op **ops;
61 struct rte_crypto_op **ops_processed;
63 struct rte_cryptodev_sym_session *sess;
65 cperf_populate_ops_t populate_ops;
67 const struct cperf_options *options;
68 const struct cperf_test_vector *test_vector;
71 struct pmd_cyclecount_state {
72 struct cperf_pmd_cyclecount_ctx *ctx;
73 const struct cperf_options *opts;
79 uint32_t ops_enq_retries;
80 uint32_t ops_deq_retries;
81 double cycles_per_build;
82 double cycles_per_enq;
83 double cycles_per_deq;
86 static const uint16_t iv_offset =
87 sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op);
90 cperf_pmd_cyclecount_test_free(struct cperf_pmd_cyclecount_ctx *ctx)
94 rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
95 rte_cryptodev_sym_session_free(ctx->sess);
98 cperf_free_common_memory(ctx->options,
99 ctx->pkt_mbuf_pool_in,
100 ctx->pkt_mbuf_pool_out,
101 ctx->mbufs_in, ctx->mbufs_out,
102 ctx->crypto_op_pool);
106 if (ctx->ops_processed)
107 rte_free(ctx->ops_processed);
114 cperf_pmd_cyclecount_test_constructor(struct rte_mempool *sess_mp,
115 uint8_t dev_id, uint16_t qp_id,
116 const struct cperf_options *options,
117 const struct cperf_test_vector *test_vector,
118 const struct cperf_op_fns *op_fns)
120 struct cperf_pmd_cyclecount_ctx *ctx = NULL;
122 /* preallocate buffers for crypto ops as they can get quite big */
123 size_t alloc_sz = sizeof(struct rte_crypto_op *) *
124 options->nb_descriptors;
126 ctx = rte_malloc(NULL, sizeof(struct cperf_pmd_cyclecount_ctx), 0);
130 ctx->dev_id = dev_id;
133 ctx->populate_ops = op_fns->populate_ops;
134 ctx->options = options;
135 ctx->test_vector = test_vector;
137 /* IV goes at the end of the crypto operation */
138 uint16_t iv_offset = sizeof(struct rte_crypto_op) +
139 sizeof(struct rte_crypto_sym_op);
141 ctx->sess = op_fns->sess_create(
142 sess_mp, dev_id, options, test_vector, iv_offset);
143 if (ctx->sess == NULL)
146 if (cperf_alloc_common_memory(options, test_vector, dev_id, 0,
147 &ctx->pkt_mbuf_pool_in, &ctx->pkt_mbuf_pool_out,
148 &ctx->mbufs_in, &ctx->mbufs_out,
149 &ctx->crypto_op_pool) < 0)
152 ctx->ops = rte_malloc("ops", alloc_sz, 0);
156 ctx->ops_processed = rte_malloc("ops_processed", alloc_sz, 0);
157 if (!ctx->ops_processed)
163 cperf_pmd_cyclecount_test_free(ctx);
168 /* benchmark alloc-build-free of ops */
170 pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op,
171 uint16_t test_burst_size)
173 uint32_t iter_ops_left = state->opts->total_ops - cur_op;
174 uint32_t iter_ops_needed =
175 RTE_MIN(state->opts->nb_descriptors, iter_ops_left);
176 uint32_t cur_iter_op;
178 for (cur_iter_op = 0; cur_iter_op < iter_ops_needed;
179 cur_iter_op += test_burst_size) {
180 uint32_t burst_size = RTE_MIN(state->opts->total_ops - cur_op,
182 struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
184 if (burst_size != rte_crypto_op_bulk_alloc(
185 state->ctx->crypto_op_pool,
186 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
190 /* Setup crypto op, attach mbuf etc */
191 (state->ctx->populate_ops)(ops,
192 &state->ctx->mbufs_in[cur_iter_op],
193 &state->ctx->mbufs_out[cur_iter_op], burst_size,
194 state->ctx->sess, state->opts,
195 state->ctx->test_vector, iv_offset);
197 #ifdef CPERF_LINEARIZATION_ENABLE
198 /* Check if source mbufs require coalescing */
199 if (state->linearize) {
201 for (i = 0; i < burst_size; i++) {
202 struct rte_mbuf *src = ops[i]->sym->m_src;
203 rte_pktmbuf_linearize(src);
206 #endif /* CPERF_LINEARIZATION_ENABLE */
207 rte_mempool_put_bulk(state->ctx->crypto_op_pool, (void **)ops,
214 /* allocate and build ops (no free) */
216 pmd_cyclecount_build_ops(struct pmd_cyclecount_state *state,
217 uint32_t iter_ops_needed, uint16_t test_burst_size)
219 uint32_t cur_iter_op;
221 for (cur_iter_op = 0; cur_iter_op < iter_ops_needed;
222 cur_iter_op += test_burst_size) {
223 uint32_t burst_size = RTE_MIN(
224 iter_ops_needed - cur_iter_op, test_burst_size);
225 struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
227 if (burst_size != rte_crypto_op_bulk_alloc(
228 state->ctx->crypto_op_pool,
229 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
233 /* Setup crypto op, attach mbuf etc */
234 (state->ctx->populate_ops)(ops,
235 &state->ctx->mbufs_in[cur_iter_op],
236 &state->ctx->mbufs_out[cur_iter_op], burst_size,
237 state->ctx->sess, state->opts,
238 state->ctx->test_vector, iv_offset);
243 /* benchmark enqueue, returns number of ops enqueued */
245 pmd_cyclecount_bench_enq(struct pmd_cyclecount_state *state,
246 uint32_t iter_ops_needed, uint16_t test_burst_size)
248 /* Enqueue full descriptor ring of ops on crypto device */
249 uint32_t cur_iter_op = 0;
250 while (cur_iter_op < iter_ops_needed) {
251 uint32_t burst_size = RTE_MIN(iter_ops_needed - cur_iter_op,
253 struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
256 burst_enqd = rte_cryptodev_enqueue_burst(state->ctx->dev_id,
257 state->ctx->qp_id, ops, burst_size);
259 /* if we couldn't enqueue anything, the queue is full */
261 /* don't try to dequeue anything we didn't enqueue */
265 if (burst_enqd < burst_size)
266 state->ops_enq_retries++;
267 state->ops_enqd += burst_enqd;
268 cur_iter_op += burst_enqd;
270 return iter_ops_needed;
273 /* benchmark dequeue */
275 pmd_cyclecount_bench_deq(struct pmd_cyclecount_state *state,
276 uint32_t iter_ops_needed, uint16_t test_burst_size)
278 /* Dequeue full descriptor ring of ops on crypto device */
279 uint32_t cur_iter_op = 0;
280 while (cur_iter_op < iter_ops_needed) {
281 uint32_t burst_size = RTE_MIN(iter_ops_needed - cur_iter_op,
283 struct rte_crypto_op **ops_processed =
284 &state->ctx->ops[cur_iter_op];
287 burst_deqd = rte_cryptodev_dequeue_burst(state->ctx->dev_id,
288 state->ctx->qp_id, ops_processed, burst_size);
290 if (burst_deqd < burst_size)
291 state->ops_deq_retries++;
292 state->ops_deqd += burst_deqd;
293 cur_iter_op += burst_deqd;
297 /* run benchmark per burst size */
299 pmd_cyclecount_bench_burst_sz(
300 struct pmd_cyclecount_state *state, uint16_t test_burst_size)
309 /* reset all counters */
313 state->ops_enq_retries = 0;
315 state->ops_deq_retries = 0;
318 * Benchmark crypto op alloc-build-free separately.
320 tsc_start = rte_rdtsc_precise();
322 for (cur_op = 0; cur_op < state->opts->total_ops;
323 cur_op += state->opts->nb_descriptors) {
324 if (unlikely(pmd_cyclecount_bench_ops(
325 state, cur_op, test_burst_size)))
329 tsc_end = rte_rdtsc_precise();
330 tsc_op = tsc_end - tsc_start;
334 * Hardware acceleration cyclecount benchmarking loop.
336 * We're benchmarking raw enq/deq performance by filling up the device
337 * queue, so we never get any failed enqs unless the driver won't accept
338 * the exact number of descriptors we requested, or the driver won't
339 * wrap around the end of the TX ring. However, since we're only
340 * dequeueing once we've filled up the queue, we have to benchmark it
341 * piecemeal and then average out the results.
344 while (cur_op < state->opts->total_ops) {
345 uint32_t iter_ops_left = state->opts->total_ops - cur_op;
346 uint32_t iter_ops_needed = RTE_MIN(
347 state->opts->nb_descriptors, iter_ops_left);
348 uint32_t iter_ops_allocd = iter_ops_needed;
350 /* allocate and build ops */
351 if (unlikely(pmd_cyclecount_build_ops(state, iter_ops_needed,
355 tsc_start = rte_rdtsc_precise();
357 /* fill up TX ring */
358 iter_ops_needed = pmd_cyclecount_bench_enq(state,
359 iter_ops_needed, test_burst_size);
361 tsc_end = rte_rdtsc_precise();
363 tsc_enq += tsc_end - tsc_start;
365 /* allow for HW to catch up */
367 rte_delay_us_block(state->delay);
369 tsc_start = rte_rdtsc_precise();
372 pmd_cyclecount_bench_deq(state, iter_ops_needed,
375 tsc_end = rte_rdtsc_precise();
377 tsc_deq += tsc_end - tsc_start;
379 cur_op += iter_ops_needed;
382 * we may not have processed all ops that we allocated, so
383 * free everything we've allocated.
385 rte_mempool_put_bulk(state->ctx->crypto_op_pool,
386 (void **)state->ctx->ops, iter_ops_allocd);
389 state->cycles_per_build = (double)tsc_op / state->opts->total_ops;
390 state->cycles_per_enq = (double)tsc_enq / state->ops_enqd;
391 state->cycles_per_deq = (double)tsc_deq / state->ops_deqd;
397 cperf_pmd_cyclecount_test_runner(void *test_ctx)
399 struct pmd_cyclecount_state state = {0};
400 const struct cperf_options *opts;
401 uint16_t test_burst_size;
402 uint8_t burst_size_idx = 0;
404 state.ctx = test_ctx;
405 opts = state.ctx->options;
407 state.lcore = rte_lcore_id();
410 static int only_once;
411 static bool warmup = true;
414 * We need a small delay to allow for hardware to process all the crypto
415 * operations. We can't automatically figure out what the delay should
416 * be, so we leave it up to the user (by default it's 0).
418 state.delay = 1000 * opts->pmdcc_delay;
420 #ifdef CPERF_LINEARIZATION_ENABLE
421 struct rte_cryptodev_info dev_info;
423 /* Check if source mbufs require coalescing */
424 if (opts->segments_nb > 1) {
425 rte_cryptodev_info_get(state.ctx->dev_id, &dev_info);
426 if ((dev_info.feature_flags &
427 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) ==
432 #endif /* CPERF_LINEARIZATION_ENABLE */
434 state.ctx->lcore_id = state.lcore;
436 /* Get first size from range or list */
437 if (opts->inc_burst_size != 0)
438 test_burst_size = opts->min_burst_size;
440 test_burst_size = opts->burst_size_list[0];
442 while (test_burst_size <= opts->max_burst_size) {
443 /* do a benchmark run */
444 if (pmd_cyclecount_bench_burst_sz(&state, test_burst_size))
448 * First run is always a warm up run.
457 printf(PRETTY_HDR_FMT, "lcore id", "Buf Size",
458 "Burst Size", "Enqueued",
459 "Dequeued", "Enq Retries",
460 "Deq Retries", "Cycles/Op",
461 "Cycles/Enq", "Cycles/Deq");
464 printf(PRETTY_LINE_FMT, state.ctx->lcore_id,
465 opts->test_buffer_size, test_burst_size,
466 state.ops_enqd, state.ops_deqd,
467 state.ops_enq_retries,
468 state.ops_deq_retries,
469 state.cycles_per_build,
470 state.cycles_per_enq,
471 state.cycles_per_deq);
474 printf(CSV_HDR_FMT, "# lcore id", "Buf Size",
475 "Burst Size", "Enqueued",
476 "Dequeued", "Enq Retries",
477 "Deq Retries", "Cycles/Op",
478 "Cycles/Enq", "Cycles/Deq");
481 printf(CSV_LINE_FMT, state.ctx->lcore_id,
482 opts->test_buffer_size, test_burst_size,
483 state.ops_enqd, state.ops_deqd,
484 state.ops_enq_retries,
485 state.ops_deq_retries,
486 state.cycles_per_build,
487 state.cycles_per_enq,
488 state.cycles_per_deq);
491 /* Get next size from range or list */
492 if (opts->inc_burst_size != 0)
493 test_burst_size += opts->inc_burst_size;
495 if (++burst_size_idx == opts->burst_size_count)
497 test_burst_size = opts->burst_size_list[burst_size_idx];
505 cperf_pmd_cyclecount_test_destructor(void *arg)
507 struct cperf_pmd_cyclecount_ctx *ctx = arg;
512 cperf_pmd_cyclecount_test_free(ctx);