4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_malloc.h>
34 #include <rte_cycles.h>
35 #include <rte_crypto.h>
36 #include <rte_cryptodev.h>
38 #include "cperf_test_latency.h"
39 #include "cperf_ops.h"
42 struct cperf_latency_results {
62 struct cperf_op_result {
65 enum rte_crypto_op_status status;
68 struct cperf_latency_ctx {
73 struct rte_mempool *pkt_mbuf_pool_in;
74 struct rte_mempool *pkt_mbuf_pool_out;
75 struct rte_mbuf **mbufs_in;
76 struct rte_mbuf **mbufs_out;
78 struct rte_mempool *crypto_op_pool;
80 struct rte_cryptodev_sym_session *sess;
82 cperf_populate_ops_t populate_ops;
83 cperf_verify_crypto_op_t verify_op_output;
85 const struct cperf_options *options;
86 const struct cperf_test_vector *test_vector;
87 struct cperf_op_result *res;
88 struct cperf_latency_results results;
91 #define max(a, b) (a > b ? (uint64_t)a : (uint64_t)b)
92 #define min(a, b) (a < b ? (uint64_t)a : (uint64_t)b)
95 cperf_latency_test_free(struct cperf_latency_ctx *ctx, uint32_t mbuf_nb)
101 rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
104 for (i = 0; i < mbuf_nb; i++)
105 rte_pktmbuf_free(ctx->mbufs_in[i]);
107 rte_free(ctx->mbufs_in);
110 if (ctx->mbufs_out) {
111 for (i = 0; i < mbuf_nb; i++) {
112 if (ctx->mbufs_out[i] != NULL)
113 rte_pktmbuf_free(ctx->mbufs_out[i]);
116 rte_free(ctx->mbufs_out);
119 if (ctx->pkt_mbuf_pool_in)
120 rte_mempool_free(ctx->pkt_mbuf_pool_in);
122 if (ctx->pkt_mbuf_pool_out)
123 rte_mempool_free(ctx->pkt_mbuf_pool_out);
125 if (ctx->crypto_op_pool)
126 rte_mempool_free(ctx->crypto_op_pool);
133 static struct rte_mbuf *
134 cperf_mbuf_create(struct rte_mempool *mempool,
135 uint32_t segments_nb,
136 const struct cperf_options *options,
137 const struct cperf_test_vector *test_vector)
139 struct rte_mbuf *mbuf;
140 uint32_t segment_sz = options->buffer_sz / segments_nb;
141 uint32_t last_sz = options->buffer_sz % segments_nb;
144 (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
145 test_vector->plaintext.data :
146 test_vector->ciphertext.data;
148 mbuf = rte_pktmbuf_alloc(mempool);
152 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
153 if (mbuf_data == NULL)
156 memcpy(mbuf_data, test_data, segment_sz);
157 test_data += segment_sz;
160 while (segments_nb) {
163 m = rte_pktmbuf_alloc(mempool);
167 rte_pktmbuf_chain(mbuf, m);
169 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
170 if (mbuf_data == NULL)
173 memcpy(mbuf_data, test_data, segment_sz);
174 test_data += segment_sz;
179 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
180 if (mbuf_data == NULL)
183 memcpy(mbuf_data, test_data, last_sz);
186 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
187 options->auth_digest_sz);
188 if (mbuf_data == NULL)
191 if (options->op_type == CPERF_AEAD) {
192 uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
193 RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
198 memcpy(aead, test_vector->aad.data, test_vector->aad.length);
204 rte_pktmbuf_free(mbuf);
210 cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
211 const struct cperf_options *options,
212 const struct cperf_test_vector *test_vector,
213 const struct cperf_op_fns *op_fns)
215 struct cperf_latency_ctx *ctx = NULL;
216 unsigned int mbuf_idx = 0;
217 char pool_name[32] = "";
219 ctx = rte_malloc(NULL, sizeof(struct cperf_latency_ctx), 0);
223 ctx->dev_id = dev_id;
226 ctx->populate_ops = op_fns->populate_ops;
227 ctx->options = options;
228 ctx->test_vector = test_vector;
230 ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
231 if (ctx->sess == NULL)
234 snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
237 ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
238 options->pool_sz * options->segments_nb, 0, 0,
239 RTE_PKTMBUF_HEADROOM +
240 RTE_CACHE_LINE_ROUNDUP(
241 (options->buffer_sz / options->segments_nb) +
242 (options->buffer_sz % options->segments_nb) +
243 options->auth_digest_sz),
246 if (ctx->pkt_mbuf_pool_in == NULL)
249 /* Generate mbufs_in with plaintext populated for test */
250 if (ctx->options->pool_sz % ctx->options->burst_sz)
253 ctx->mbufs_in = rte_malloc(NULL,
254 (sizeof(struct rte_mbuf *) *
255 ctx->options->pool_sz), 0);
257 for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
258 ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
259 ctx->pkt_mbuf_pool_in, options->segments_nb,
260 options, test_vector);
261 if (ctx->mbufs_in[mbuf_idx] == NULL)
265 if (options->out_of_place == 1) {
267 snprintf(pool_name, sizeof(pool_name),
268 "cperf_pool_out_cdev_%d",
271 ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
272 pool_name, options->pool_sz, 0, 0,
273 RTE_PKTMBUF_HEADROOM +
274 RTE_CACHE_LINE_ROUNDUP(
276 options->auth_digest_sz),
279 if (ctx->pkt_mbuf_pool_out == NULL)
283 ctx->mbufs_out = rte_malloc(NULL,
284 (sizeof(struct rte_mbuf *) *
285 ctx->options->pool_sz), 0);
287 for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
288 if (options->out_of_place == 1) {
289 ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
290 ctx->pkt_mbuf_pool_out, 1,
291 options, test_vector);
292 if (ctx->mbufs_out[mbuf_idx] == NULL)
295 ctx->mbufs_out[mbuf_idx] = NULL;
299 snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
302 ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
303 RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
305 if (ctx->crypto_op_pool == NULL)
308 ctx->res = rte_malloc(NULL, sizeof(struct cperf_op_result) *
309 ctx->options->total_ops, 0);
311 if (ctx->res == NULL)
316 cperf_latency_test_free(ctx, mbuf_idx);
322 cperf_latency_test_verifier(struct rte_mbuf *mbuf,
323 const struct cperf_options *options,
324 const struct cperf_test_vector *vector)
326 const struct rte_mbuf *m;
330 uint32_t cipher_offset, auth_offset;
331 uint8_t cipher, auth;
335 nb_segs = m->nb_segs;
337 while (m && nb_segs != 0) {
343 data = rte_malloc(NULL, len, 0);
348 nb_segs = m->nb_segs;
350 while (m && nb_segs != 0) {
351 memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
358 switch (options->op_type) {
359 case CPERF_CIPHER_ONLY:
365 case CPERF_CIPHER_THEN_AUTH:
369 auth_offset = vector->plaintext.length;
371 case CPERF_AUTH_ONLY:
375 auth_offset = vector->plaintext.length;
377 case CPERF_AUTH_THEN_CIPHER:
381 auth_offset = vector->plaintext.length;
385 cipher_offset = vector->aad.length;
387 auth_offset = vector->aad.length + vector->plaintext.length;
392 if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
393 res += memcmp(data + cipher_offset,
394 vector->ciphertext.data,
395 vector->ciphertext.length);
397 res += memcmp(data + cipher_offset,
398 vector->plaintext.data,
399 vector->plaintext.length);
403 if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
404 res += memcmp(data + auth_offset,
406 vector->digest.length);
416 cperf_latency_test_runner(void *arg)
418 struct cperf_latency_ctx *ctx = arg;
419 struct cperf_op_result *pres;
424 struct rte_crypto_op *ops[ctx->options->burst_sz];
425 struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
426 uint64_t ops_enqd = 0, ops_deqd = 0;
427 uint16_t ops_unused = 0;
428 uint64_t m_idx = 0, b_idx = 0, i;
430 uint64_t tsc_val, tsc_end, tsc_start;
431 uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
432 uint64_t enqd_max = 0, enqd_min = ~0UL, enqd_tot = 0;
433 uint64_t deqd_max = 0, deqd_min = ~0UL, deqd_tot = 0;
435 uint32_t lcore = rte_lcore_id();
437 #ifdef CPERF_LINEARIZATION_ENABLE
438 struct rte_cryptodev_info dev_info;
441 /* Check if source mbufs require coalescing */
442 if (ctx->options->segments_nb > 1) {
443 rte_cryptodev_info_get(ctx->dev_id, &dev_info);
444 if ((dev_info.feature_flags &
445 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
448 #endif /* CPERF_LINEARIZATION_ENABLE */
450 ctx->lcore_id = lcore;
452 /* Warm up the host CPU before starting the test */
453 for (i = 0; i < ctx->options->total_ops; i++)
454 rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
456 while (enqd_tot < ctx->options->total_ops) {
458 uint16_t burst_size = ((enqd_tot + ctx->options->burst_sz)
459 <= ctx->options->total_ops) ?
460 ctx->options->burst_sz :
461 ctx->options->total_ops -
463 uint16_t ops_needed = burst_size - ops_unused;
465 /* Allocate crypto ops from pool */
466 if (ops_needed != rte_crypto_op_bulk_alloc(
468 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
472 /* Setup crypto op, attach mbuf etc */
473 (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
474 &ctx->mbufs_out[m_idx],
475 ops_needed, ctx->sess, ctx->options,
478 tsc_start = rte_rdtsc_precise();
480 #ifdef CPERF_LINEARIZATION_ENABLE
482 /* PMD doesn't support scatter-gather and source buffer
484 * We need to linearize it before enqueuing.
486 for (i = 0; i < burst_size; i++)
487 rte_pktmbuf_linearize(ops[i]->sym->m_src);
489 #endif /* CPERF_LINEARIZATION_ENABLE */
491 /* Enqueue burst of ops on crypto device */
492 ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
495 /* Dequeue processed burst of ops from crypto device */
496 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
497 ops_processed, ctx->options->burst_sz);
499 tsc_end = rte_rdtsc_precise();
501 for (i = 0; i < ops_needed; i++) {
502 ctx->res[tsc_idx].tsc_start = tsc_start;
503 ops[i]->opaque_data = (void *)&ctx->res[tsc_idx];
508 * Calculate number of ops not enqueued (mainly for hw
509 * accelerators whose ingress queue can fill up).
511 ops_unused = burst_size - ops_enqd;
513 if (likely(ops_deqd)) {
515 * free crypto ops so they can be reused. We don't free
516 * the mbufs here as we don't want to reuse them as
517 * the crypto operation will change the data and cause
520 for (i = 0; i < ops_deqd; i++) {
521 pres = (struct cperf_op_result *)
522 (ops_processed[i]->opaque_data);
523 pres->status = ops_processed[i]->status;
524 pres->tsc_end = tsc_end;
526 rte_crypto_op_free(ops_processed[i]);
529 deqd_tot += ops_deqd;
530 deqd_max = max(ops_deqd, deqd_max);
531 deqd_min = min(ops_deqd, deqd_min);
534 enqd_tot += ops_enqd;
535 enqd_max = max(ops_enqd, enqd_max);
536 enqd_min = min(ops_enqd, enqd_min);
539 m_idx = m_idx + ctx->options->burst_sz > ctx->options->pool_sz ?
544 /* Dequeue any operations still in the crypto device */
545 while (deqd_tot < ctx->options->total_ops) {
546 /* Sending 0 length burst to flush sw crypto device */
547 rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
550 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
551 ops_processed, ctx->options->burst_sz);
553 tsc_end = rte_rdtsc_precise();
556 for (i = 0; i < ops_deqd; i++) {
557 pres = (struct cperf_op_result *)
558 (ops_processed[i]->opaque_data);
559 pres->status = ops_processed[i]->status;
560 pres->tsc_end = tsc_end;
562 rte_crypto_op_free(ops_processed[i]);
565 deqd_tot += ops_deqd;
566 deqd_max = max(ops_deqd, deqd_max);
567 deqd_min = min(ops_deqd, deqd_min);
571 for (i = 0; i < tsc_idx; i++) {
572 tsc_val = ctx->res[i].tsc_end - ctx->res[i].tsc_start;
573 tsc_max = max(tsc_val, tsc_max);
574 tsc_min = min(tsc_val, tsc_min);
578 if (ctx->options->verify) {
579 struct rte_mbuf **mbufs;
581 if (ctx->options->out_of_place == 1)
582 mbufs = ctx->mbufs_out;
584 mbufs = ctx->mbufs_in;
586 for (i = 0; i < ctx->options->total_ops; i++) {
588 if (ctx->res[i].status != RTE_CRYPTO_OP_STATUS_SUCCESS
589 || cperf_latency_test_verifier(mbufs[i],
593 ctx->results.ops_failed++;
598 ctx->results.enqd_tot = enqd_tot;
599 ctx->results.enqd_max = enqd_max;
600 ctx->results.enqd_min = enqd_min;
602 ctx->results.deqd_tot = deqd_tot;
603 ctx->results.deqd_max = deqd_max;
604 ctx->results.deqd_min = deqd_min;
606 ctx->results.cycles_tot = tsc_tot;
607 ctx->results.cycles_max = tsc_max;
608 ctx->results.cycles_min = tsc_min;
610 ctx->results.burst_num = b_idx;
611 ctx->results.num = tsc_idx;
617 cperf_latency_test_destructor(void *arg)
619 struct cperf_latency_ctx *ctx = arg;
623 static int only_once;
624 uint64_t etot, eavg, emax, emin;
625 uint64_t dtot, davg, dmax, dmin;
626 uint64_t ctot, cavg, cmax, cmin;
627 double ttot, tavg, tmax, tmin;
629 const uint64_t tunit = 1000000; /* us */
630 const uint64_t tsc_hz = rte_get_tsc_hz();
632 etot = ctx->results.enqd_tot;
633 eavg = ctx->results.enqd_tot / ctx->results.burst_num;
634 emax = ctx->results.enqd_max;
635 emin = ctx->results.enqd_min;
637 dtot = ctx->results.deqd_tot;
638 davg = ctx->results.deqd_tot / ctx->results.burst_num;
639 dmax = ctx->results.deqd_max;
640 dmin = ctx->results.deqd_min;
642 ctot = ctx->results.cycles_tot;
643 cavg = ctx->results.cycles_tot / ctx->results.num;
644 cmax = ctx->results.cycles_max;
645 cmin = ctx->results.cycles_min;
647 ttot = tunit*(double)(ctot) / tsc_hz;
648 tavg = tunit*(double)(cavg) / tsc_hz;
649 tmax = tunit*(double)(cmax) / tsc_hz;
650 tmin = tunit*(double)(cmin) / tsc_hz;
652 if (ctx->options->csv) {
654 printf("\n# lcore, Pakt Seq #, Packet Size, cycles,"
657 for (i = 0; i < ctx->options->total_ops; i++) {
659 printf("\n%u;%"PRIu64";%"PRIu64";%.3f",
660 ctx->lcore_id, i + 1,
661 ctx->res[i].tsc_end - ctx->res[i].tsc_start,
662 tunit * (double) (ctx->res[i].tsc_end
663 - ctx->res[i].tsc_start)
669 printf("\n# Device %d on lcore %u\n", ctx->dev_id,
671 printf("\n# total operations: %u", ctx->options->total_ops);
672 printf("\n# verified failed: %"PRIu64,
673 ctx->results.ops_failed);
674 printf("\n# burst number: %"PRIu64,
675 ctx->results.burst_num);
677 printf("\n# \t Total\t Average\t Maximum\t "
679 printf("\n# enqueued\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
680 "%10"PRIu64, etot, eavg, emax, emin);
681 printf("\n# dequeued\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
682 "%10"PRIu64, dtot, davg, dmax, dmin);
683 printf("\n# cycles\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
684 "%10"PRIu64, ctot, cavg, cmax, cmin);
685 printf("\n# time [us]\t%12.0f\t%10.3f\t%10.3f\t%10.3f", ttot,
690 cperf_latency_test_free(ctx, ctx->options->pool_sz);