4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_malloc.h>
34 #include <rte_cycles.h>
35 #include <rte_crypto.h>
36 #include <rte_cryptodev.h>
38 #include "cperf_test_verify.h"
39 #include "cperf_ops.h"
40 #include "cperf_test_common.h"
42 struct cperf_verify_ctx {
47 struct rte_mempool *pool;
49 struct rte_cryptodev_sym_session *sess;
51 cperf_populate_ops_t populate_ops;
53 uint32_t src_buf_offset;
54 uint32_t dst_buf_offset;
56 const struct cperf_options *options;
57 const struct cperf_test_vector *test_vector;
60 struct cperf_op_result {
61 enum rte_crypto_op_status status;
65 cperf_verify_test_free(struct cperf_verify_ctx *ctx)
69 rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
70 rte_cryptodev_sym_session_free(ctx->sess);
74 rte_mempool_free(ctx->pool);
81 cperf_verify_test_constructor(struct rte_mempool *sess_mp,
82 uint8_t dev_id, uint16_t qp_id,
83 const struct cperf_options *options,
84 const struct cperf_test_vector *test_vector,
85 const struct cperf_op_fns *op_fns)
87 struct cperf_verify_ctx *ctx = NULL;
89 ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0);
96 ctx->populate_ops = op_fns->populate_ops;
97 ctx->options = options;
98 ctx->test_vector = test_vector;
100 /* IV goes at the end of the crypto operation */
101 uint16_t iv_offset = sizeof(struct rte_crypto_op) +
102 sizeof(struct rte_crypto_sym_op);
104 ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector,
106 if (ctx->sess == NULL)
109 if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
110 &ctx->src_buf_offset, &ctx->dst_buf_offset,
116 cperf_verify_test_free(ctx);
122 cperf_verify_op(struct rte_crypto_op *op,
123 const struct cperf_options *options,
124 const struct cperf_test_vector *vector)
126 const struct rte_mbuf *m;
130 uint32_t cipher_offset, auth_offset;
131 uint8_t cipher, auth;
134 if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
141 nb_segs = m->nb_segs;
143 while (m && nb_segs != 0) {
149 data = rte_malloc(NULL, len, 0);
157 nb_segs = m->nb_segs;
159 while (m && nb_segs != 0) {
160 memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
167 switch (options->op_type) {
168 case CPERF_CIPHER_ONLY:
174 case CPERF_CIPHER_THEN_AUTH:
178 auth_offset = options->test_buffer_size;
180 case CPERF_AUTH_ONLY:
184 auth_offset = options->test_buffer_size;
186 case CPERF_AUTH_THEN_CIPHER:
190 auth_offset = options->test_buffer_size;
196 auth_offset = options->test_buffer_size;
204 if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
205 res += memcmp(data + cipher_offset,
206 vector->ciphertext.data,
207 options->test_buffer_size);
209 res += memcmp(data + cipher_offset,
210 vector->plaintext.data,
211 options->test_buffer_size);
215 if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
216 res += memcmp(data + auth_offset,
227 cperf_mbuf_set(struct rte_mbuf *mbuf,
228 const struct cperf_options *options,
229 const struct cperf_test_vector *test_vector)
231 uint32_t segment_sz = options->segment_sz;
234 (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
235 test_vector->plaintext.data :
236 test_vector->ciphertext.data;
237 uint32_t remaining_bytes = options->max_buffer_size;
239 while (remaining_bytes) {
240 mbuf_data = rte_pktmbuf_mtod(mbuf, uint8_t *);
242 if (remaining_bytes <= segment_sz) {
243 memcpy(mbuf_data, test_data, remaining_bytes);
247 memcpy(mbuf_data, test_data, segment_sz);
248 remaining_bytes -= segment_sz;
249 test_data += segment_sz;
255 cperf_verify_test_runner(void *test_ctx)
257 struct cperf_verify_ctx *ctx = test_ctx;
259 uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
260 uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
261 uint64_t ops_failed = 0;
263 static int only_once;
266 uint16_t ops_unused = 0;
268 struct rte_crypto_op *ops[ctx->options->max_burst_size];
269 struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
271 uint32_t lcore = rte_lcore_id();
273 #ifdef CPERF_LINEARIZATION_ENABLE
274 struct rte_cryptodev_info dev_info;
277 /* Check if source mbufs require coalescing */
278 if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
279 rte_cryptodev_info_get(ctx->dev_id, &dev_info);
280 if ((dev_info.feature_flags &
281 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
284 #endif /* CPERF_LINEARIZATION_ENABLE */
286 ctx->lcore_id = lcore;
288 if (!ctx->options->csv)
289 printf("\n# Running verify test on device: %u, lcore: %u\n",
292 uint16_t iv_offset = sizeof(struct rte_crypto_op) +
293 sizeof(struct rte_crypto_sym_op);
295 while (ops_enqd_total < ctx->options->total_ops) {
297 uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
298 <= ctx->options->total_ops) ?
299 ctx->options->max_burst_size :
300 ctx->options->total_ops -
303 uint16_t ops_needed = burst_size - ops_unused;
305 /* Allocate objects containing crypto operations and mbufs */
306 if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
309 "Failed to allocate more crypto operations "
310 "from the the crypto operation pool.\n"
311 "Consider increasing the pool size "
316 /* Setup crypto op, attach mbuf etc */
317 (ctx->populate_ops)(ops, ctx->src_buf_offset,
319 ops_needed, ctx->sess, ctx->options,
320 ctx->test_vector, iv_offset);
323 /* Populate the mbuf with the test vector, for verification */
324 for (i = 0; i < ops_needed; i++)
325 cperf_mbuf_set(ops[i]->sym->m_src,
329 #ifdef CPERF_LINEARIZATION_ENABLE
331 /* PMD doesn't support scatter-gather and source buffer
333 * We need to linearize it before enqueuing.
335 for (i = 0; i < burst_size; i++)
336 rte_pktmbuf_linearize(ops[i]->sym->m_src);
338 #endif /* CPERF_LINEARIZATION_ENABLE */
340 /* Enqueue burst of ops on crypto device */
341 ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
343 if (ops_enqd < burst_size)
347 * Calculate number of ops not enqueued (mainly for hw
348 * accelerators whose ingress queue can fill up).
350 ops_unused = burst_size - ops_enqd;
351 ops_enqd_total += ops_enqd;
354 /* Dequeue processed burst of ops from crypto device */
355 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
356 ops_processed, ctx->options->max_burst_size);
360 * Count dequeue polls which didn't return any
361 * processed operations. This statistic is mainly
362 * relevant to hw accelerators.
368 for (i = 0; i < ops_deqd; i++) {
369 if (cperf_verify_op(ops_processed[i], ctx->options,
373 /* Free crypto ops so they can be reused. */
374 rte_mempool_put_bulk(ctx->pool,
375 (void **)ops_processed, ops_deqd);
376 ops_deqd_total += ops_deqd;
379 /* Dequeue any operations still in the crypto device */
381 while (ops_deqd_total < ctx->options->total_ops) {
382 /* Sending 0 length burst to flush sw crypto device */
383 rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
386 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
387 ops_processed, ctx->options->max_burst_size);
393 for (i = 0; i < ops_deqd; i++) {
394 if (cperf_verify_op(ops_processed[i], ctx->options,
398 /* Free crypto ops so they can be reused. */
399 rte_mempool_put_bulk(ctx->pool,
400 (void **)ops_processed, ops_deqd);
401 ops_deqd_total += ops_deqd;
404 if (!ctx->options->csv) {
406 printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
407 "lcore id", "Buf Size", "Burst size",
408 "Enqueued", "Dequeued", "Failed Enq",
409 "Failed Deq", "Failed Ops");
412 printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
413 "%12"PRIu64"%12"PRIu64"\n",
415 ctx->options->max_buffer_size,
416 ctx->options->max_burst_size,
424 printf("\n# lcore id, Buffer Size(B), "
425 "Burst Size,Enqueued,Dequeued,Failed Enq,"
426 "Failed Deq,Failed Ops\n");
429 printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
432 ctx->options->max_buffer_size,
433 ctx->options->max_burst_size,
447 cperf_verify_test_destructor(void *arg)
449 struct cperf_verify_ctx *ctx = arg;
454 cperf_verify_test_free(ctx);