4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_malloc.h>
34 #include <rte_cycles.h>
35 #include <rte_crypto.h>
36 #include <rte_cryptodev.h>
38 #include "cperf_test_latency.h"
39 #include "cperf_ops.h"
42 struct cperf_op_result {
45 enum rte_crypto_op_status status;
48 struct cperf_latency_ctx {
53 struct rte_mempool *pkt_mbuf_pool_in;
54 struct rte_mempool *pkt_mbuf_pool_out;
55 struct rte_mbuf **mbufs_in;
56 struct rte_mbuf **mbufs_out;
58 struct rte_mempool *crypto_op_pool;
60 struct rte_cryptodev_sym_session *sess;
62 cperf_populate_ops_t populate_ops;
64 const struct cperf_options *options;
65 const struct cperf_test_vector *test_vector;
66 struct cperf_op_result *res;
70 struct cperf_op_result *result;
73 #define max(a, b) (a > b ? (uint64_t)a : (uint64_t)b)
74 #define min(a, b) (a < b ? (uint64_t)a : (uint64_t)b)
77 cperf_latency_test_free(struct cperf_latency_ctx *ctx, uint32_t mbuf_nb)
83 rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
84 rte_cryptodev_sym_session_free(ctx->sess);
88 for (i = 0; i < mbuf_nb; i++)
89 rte_pktmbuf_free(ctx->mbufs_in[i]);
91 rte_free(ctx->mbufs_in);
95 for (i = 0; i < mbuf_nb; i++) {
96 if (ctx->mbufs_out[i] != NULL)
97 rte_pktmbuf_free(ctx->mbufs_out[i]);
100 rte_free(ctx->mbufs_out);
103 if (ctx->pkt_mbuf_pool_in)
104 rte_mempool_free(ctx->pkt_mbuf_pool_in);
106 if (ctx->pkt_mbuf_pool_out)
107 rte_mempool_free(ctx->pkt_mbuf_pool_out);
109 if (ctx->crypto_op_pool)
110 rte_mempool_free(ctx->crypto_op_pool);
117 static struct rte_mbuf *
118 cperf_mbuf_create(struct rte_mempool *mempool,
119 uint32_t segments_nb,
120 const struct cperf_options *options,
121 const struct cperf_test_vector *test_vector)
123 struct rte_mbuf *mbuf;
124 uint32_t segment_sz = options->max_buffer_size / segments_nb;
125 uint32_t last_sz = options->max_buffer_size % segments_nb;
128 (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
129 test_vector->plaintext.data :
130 test_vector->ciphertext.data;
132 mbuf = rte_pktmbuf_alloc(mempool);
136 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
137 if (mbuf_data == NULL)
140 memcpy(mbuf_data, test_data, segment_sz);
141 test_data += segment_sz;
144 while (segments_nb) {
147 m = rte_pktmbuf_alloc(mempool);
151 rte_pktmbuf_chain(mbuf, m);
153 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
154 if (mbuf_data == NULL)
157 memcpy(mbuf_data, test_data, segment_sz);
158 test_data += segment_sz;
163 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
164 if (mbuf_data == NULL)
167 memcpy(mbuf_data, test_data, last_sz);
170 if (options->op_type != CPERF_CIPHER_ONLY) {
171 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
173 if (mbuf_data == NULL)
177 if (options->op_type == CPERF_AEAD) {
178 uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
179 RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
184 memcpy(aead, test_vector->aad.data, test_vector->aad.length);
190 rte_pktmbuf_free(mbuf);
196 cperf_latency_test_constructor(struct rte_mempool *sess_mp,
197 uint8_t dev_id, uint16_t qp_id,
198 const struct cperf_options *options,
199 const struct cperf_test_vector *test_vector,
200 const struct cperf_op_fns *op_fns)
202 struct cperf_latency_ctx *ctx = NULL;
203 unsigned int mbuf_idx = 0;
204 char pool_name[32] = "";
206 ctx = rte_malloc(NULL, sizeof(struct cperf_latency_ctx), 0);
210 ctx->dev_id = dev_id;
213 ctx->populate_ops = op_fns->populate_ops;
214 ctx->options = options;
215 ctx->test_vector = test_vector;
217 /* IV goes at the end of the crypto operation */
218 uint16_t iv_offset = sizeof(struct rte_crypto_op) +
219 sizeof(struct rte_crypto_sym_op) +
220 sizeof(struct cperf_op_result *);
222 ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector,
224 if (ctx->sess == NULL)
227 snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
230 ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
231 options->pool_sz * options->segments_nb, 0, 0,
232 RTE_PKTMBUF_HEADROOM +
233 RTE_CACHE_LINE_ROUNDUP(
234 (options->max_buffer_size / options->segments_nb) +
235 (options->max_buffer_size % options->segments_nb) +
239 if (ctx->pkt_mbuf_pool_in == NULL)
242 /* Generate mbufs_in with plaintext populated for test */
243 ctx->mbufs_in = rte_malloc(NULL,
244 (sizeof(struct rte_mbuf *) *
245 ctx->options->pool_sz), 0);
247 for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
248 ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
249 ctx->pkt_mbuf_pool_in, options->segments_nb,
250 options, test_vector);
251 if (ctx->mbufs_in[mbuf_idx] == NULL)
255 if (options->out_of_place == 1) {
257 snprintf(pool_name, sizeof(pool_name),
258 "cperf_pool_out_cdev_%d",
261 ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
262 pool_name, options->pool_sz, 0, 0,
263 RTE_PKTMBUF_HEADROOM +
264 RTE_CACHE_LINE_ROUNDUP(
265 options->max_buffer_size +
269 if (ctx->pkt_mbuf_pool_out == NULL)
273 ctx->mbufs_out = rte_malloc(NULL,
274 (sizeof(struct rte_mbuf *) *
275 ctx->options->pool_sz), 0);
277 for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
278 if (options->out_of_place == 1) {
279 ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
280 ctx->pkt_mbuf_pool_out, 1,
281 options, test_vector);
282 if (ctx->mbufs_out[mbuf_idx] == NULL)
285 ctx->mbufs_out[mbuf_idx] = NULL;
289 snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
292 uint16_t priv_size = sizeof(struct priv_op_data) +
293 test_vector->cipher_iv.length +
294 test_vector->auth_iv.length +
295 test_vector->aead_iv.length;
296 ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
297 RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
298 512, priv_size, rte_socket_id());
300 if (ctx->crypto_op_pool == NULL)
303 ctx->res = rte_malloc(NULL, sizeof(struct cperf_op_result) *
304 ctx->options->total_ops, 0);
306 if (ctx->res == NULL)
311 cperf_latency_test_free(ctx, mbuf_idx);
317 store_timestamp(struct rte_crypto_op *op, uint64_t timestamp)
319 struct priv_op_data *priv_data;
321 priv_data = (struct priv_op_data *) (op->sym + 1);
322 priv_data->result->status = op->status;
323 priv_data->result->tsc_end = timestamp;
327 cperf_latency_test_runner(void *arg)
329 struct cperf_latency_ctx *ctx = arg;
330 uint16_t test_burst_size;
331 uint8_t burst_size_idx = 0;
333 static int only_once;
338 struct rte_crypto_op *ops[ctx->options->max_burst_size];
339 struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
341 struct priv_op_data *priv_data;
343 uint32_t lcore = rte_lcore_id();
345 #ifdef CPERF_LINEARIZATION_ENABLE
346 struct rte_cryptodev_info dev_info;
349 /* Check if source mbufs require coalescing */
350 if (ctx->options->segments_nb > 1) {
351 rte_cryptodev_info_get(ctx->dev_id, &dev_info);
352 if ((dev_info.feature_flags &
353 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
356 #endif /* CPERF_LINEARIZATION_ENABLE */
358 ctx->lcore_id = lcore;
360 /* Warm up the host CPU before starting the test */
361 for (i = 0; i < ctx->options->total_ops; i++)
362 rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
364 /* Get first size from range or list */
365 if (ctx->options->inc_burst_size != 0)
366 test_burst_size = ctx->options->min_burst_size;
368 test_burst_size = ctx->options->burst_size_list[0];
370 uint16_t iv_offset = sizeof(struct rte_crypto_op) +
371 sizeof(struct rte_crypto_sym_op) +
372 sizeof(struct cperf_op_result *);
374 while (test_burst_size <= ctx->options->max_burst_size) {
375 uint64_t ops_enqd = 0, ops_deqd = 0;
376 uint64_t m_idx = 0, b_idx = 0;
378 uint64_t tsc_val, tsc_end, tsc_start;
379 uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
380 uint64_t enqd_max = 0, enqd_min = ~0UL, enqd_tot = 0;
381 uint64_t deqd_max = 0, deqd_min = ~0UL, deqd_tot = 0;
383 while (enqd_tot < ctx->options->total_ops) {
385 uint16_t burst_size = ((enqd_tot + test_burst_size)
386 <= ctx->options->total_ops) ?
388 ctx->options->total_ops -
391 /* Allocate crypto ops from pool */
392 if (burst_size != rte_crypto_op_bulk_alloc(
394 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
397 "Failed to allocate more crypto operations "
398 "from the the crypto operation pool.\n"
399 "Consider increasing the pool size "
404 /* Setup crypto op, attach mbuf etc */
405 (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
406 &ctx->mbufs_out[m_idx],
407 burst_size, ctx->sess, ctx->options,
408 ctx->test_vector, iv_offset);
410 tsc_start = rte_rdtsc_precise();
412 #ifdef CPERF_LINEARIZATION_ENABLE
414 /* PMD doesn't support scatter-gather and source buffer
416 * We need to linearize it before enqueuing.
418 for (i = 0; i < burst_size; i++)
419 rte_pktmbuf_linearize(ops[i]->sym->m_src);
421 #endif /* CPERF_LINEARIZATION_ENABLE */
423 /* Enqueue burst of ops on crypto device */
424 ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
427 /* Dequeue processed burst of ops from crypto device */
428 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
429 ops_processed, test_burst_size);
431 tsc_end = rte_rdtsc_precise();
433 /* Free memory for not enqueued operations */
434 if (ops_enqd != burst_size)
435 rte_mempool_put_bulk(ctx->crypto_op_pool,
436 (void **)&ops[ops_enqd],
437 burst_size - ops_enqd);
439 for (i = 0; i < ops_enqd; i++) {
440 ctx->res[tsc_idx].tsc_start = tsc_start;
442 * Private data structure starts after the end of the
443 * rte_crypto_sym_op structure.
445 priv_data = (struct priv_op_data *) (ops[i]->sym + 1);
446 priv_data->result = (void *)&ctx->res[tsc_idx];
450 if (likely(ops_deqd)) {
452 * free crypto ops so they can be reused. We don't free
453 * the mbufs here as we don't want to reuse them as
454 * the crypto operation will change the data and cause
457 for (i = 0; i < ops_deqd; i++)
458 store_timestamp(ops_processed[i], tsc_end);
460 rte_mempool_put_bulk(ctx->crypto_op_pool,
461 (void **)ops_processed, ops_deqd);
463 deqd_tot += ops_deqd;
464 deqd_max = max(ops_deqd, deqd_max);
465 deqd_min = min(ops_deqd, deqd_min);
468 enqd_tot += ops_enqd;
469 enqd_max = max(ops_enqd, enqd_max);
470 enqd_min = min(ops_enqd, enqd_min);
473 m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
478 /* Dequeue any operations still in the crypto device */
479 while (deqd_tot < ctx->options->total_ops) {
480 /* Sending 0 length burst to flush sw crypto device */
481 rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
484 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
485 ops_processed, test_burst_size);
487 tsc_end = rte_rdtsc_precise();
490 for (i = 0; i < ops_deqd; i++)
491 store_timestamp(ops_processed[i], tsc_end);
493 rte_mempool_put_bulk(ctx->crypto_op_pool,
494 (void **)ops_processed, ops_deqd);
496 deqd_tot += ops_deqd;
497 deqd_max = max(ops_deqd, deqd_max);
498 deqd_min = min(ops_deqd, deqd_min);
502 for (i = 0; i < tsc_idx; i++) {
503 tsc_val = ctx->res[i].tsc_end - ctx->res[i].tsc_start;
504 tsc_max = max(tsc_val, tsc_max);
505 tsc_min = min(tsc_val, tsc_min);
509 double time_tot, time_avg, time_max, time_min;
511 const uint64_t tunit = 1000000; /* us */
512 const uint64_t tsc_hz = rte_get_tsc_hz();
514 uint64_t enqd_avg = enqd_tot / b_idx;
515 uint64_t deqd_avg = deqd_tot / b_idx;
516 uint64_t tsc_avg = tsc_tot / tsc_idx;
518 time_tot = tunit*(double)(tsc_tot) / tsc_hz;
519 time_avg = tunit*(double)(tsc_avg) / tsc_hz;
520 time_max = tunit*(double)(tsc_max) / tsc_hz;
521 time_min = tunit*(double)(tsc_min) / tsc_hz;
523 if (ctx->options->csv) {
525 printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, "
526 "Packet Size, cycles, time (us)");
528 for (i = 0; i < ctx->options->total_ops; i++) {
530 printf("\n%u;%u;%u;%"PRIu64";%"PRIu64";%.3f",
531 ctx->lcore_id, ctx->options->test_buffer_size,
532 test_burst_size, i + 1,
533 ctx->res[i].tsc_end - ctx->res[i].tsc_start,
534 tunit * (double) (ctx->res[i].tsc_end
535 - ctx->res[i].tsc_start)
541 printf("\n# Device %d on lcore %u\n", ctx->dev_id,
543 printf("\n# total operations: %u", ctx->options->total_ops);
544 printf("\n# Buffer size: %u", ctx->options->test_buffer_size);
545 printf("\n# Burst size: %u", test_burst_size);
546 printf("\n# Number of bursts: %"PRIu64,
550 printf("\n# \t Total\t Average\t "
551 "Maximum\t Minimum");
552 printf("\n# enqueued\t%12"PRIu64"\t%10"PRIu64"\t"
553 "%10"PRIu64"\t%10"PRIu64, enqd_tot,
554 enqd_avg, enqd_max, enqd_min);
555 printf("\n# dequeued\t%12"PRIu64"\t%10"PRIu64"\t"
556 "%10"PRIu64"\t%10"PRIu64, deqd_tot,
557 deqd_avg, deqd_max, deqd_min);
558 printf("\n# cycles\t%12"PRIu64"\t%10"PRIu64"\t"
559 "%10"PRIu64"\t%10"PRIu64, tsc_tot,
560 tsc_avg, tsc_max, tsc_min);
561 printf("\n# time [us]\t%12.0f\t%10.3f\t%10.3f\t%10.3f",
562 time_tot, time_avg, time_max, time_min);
567 /* Get next size from range or list */
568 if (ctx->options->inc_burst_size != 0)
569 test_burst_size += ctx->options->inc_burst_size;
571 if (++burst_size_idx == ctx->options->burst_size_count)
574 ctx->options->burst_size_list[burst_size_idx];
582 cperf_latency_test_destructor(void *arg)
584 struct cperf_latency_ctx *ctx = arg;
589 rte_cryptodev_stop(ctx->dev_id);
591 cperf_latency_test_free(ctx, ctx->options->pool_sz);