From: Pablo de Lara Date: Sun, 2 Jul 2017 05:41:10 +0000 (+0100) Subject: app/crypto-perf: move IV to crypto op private data X-Git-Tag: spdx-start~2690 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=f8b7fdd38917833e9c0f67d4ebdeb3b3a0162ef5;p=dpdk.git app/crypto-perf: move IV to crypto op private data Usually, IV will change for each crypto operation. Therefore, instead of pointing at the same location, IV is copied after each crypto operation. This will let the IV to be passed as an offset from the beginning of the crypto operation, instead of a pointer. Signed-off-by: Pablo de Lara Acked-by: Declan Doherty Acked-by: Akhil Goyal Acked-by: Fiona Trahe --- diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c index 17df2eb469..0f45a3c8fd 100644 --- a/app/test-crypto-perf/cperf_ops.c +++ b/app/test-crypto-perf/cperf_ops.c @@ -40,7 +40,8 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops, struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, - const struct cperf_test_vector *test_vector __rte_unused) + const struct cperf_test_vector *test_vector __rte_unused, + uint16_t iv_offset __rte_unused) { uint16_t i; @@ -65,7 +66,8 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops, struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, - const struct cperf_test_vector *test_vector __rte_unused) + const struct cperf_test_vector *test_vector __rte_unused, + uint16_t iv_offset __rte_unused) { uint16_t i; @@ -90,7 +92,8 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops, struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, - const struct cperf_test_vector *test_vector) + const struct cperf_test_vector *test_vector, + uint16_t iv_offset) { uint16_t i; @@ -103,8 +106,10 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops, sym_op->m_dst = bufs_out[i]; /* cipher parameters */ - sym_op->cipher.iv.data = test_vector->iv.data; - sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr; + sym_op->cipher.iv.data = rte_crypto_op_ctod_offset(ops[i], + uint8_t *, iv_offset); + sym_op->cipher.iv.phys_addr = rte_crypto_op_ctophys_offset(ops[i], + iv_offset); sym_op->cipher.iv.length = test_vector->iv.length; if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || @@ -117,6 +122,13 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops, sym_op->cipher.data.offset = 0; } + if (options->test == CPERF_TEST_TYPE_VERIFY) { + for (i = 0; i < nb_ops; i++) + memcpy(ops[i]->sym->cipher.iv.data, + test_vector->iv.data, + test_vector->iv.length); + } + return 0; } @@ -125,7 +137,8 @@ cperf_set_ops_auth(struct rte_crypto_op **ops, struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, - const struct cperf_test_vector *test_vector) + const struct cperf_test_vector *test_vector, + uint16_t iv_offset __rte_unused) { uint16_t i; @@ -189,7 +202,8 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, - const struct cperf_test_vector *test_vector) + const struct cperf_test_vector *test_vector, + uint16_t iv_offset) { uint16_t i; @@ -202,8 +216,10 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, sym_op->m_dst = bufs_out[i]; /* cipher parameters */ - sym_op->cipher.iv.data = test_vector->iv.data; - sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr; + sym_op->cipher.iv.data = rte_crypto_op_ctod_offset(ops[i], + uint8_t *, iv_offset); + sym_op->cipher.iv.phys_addr = rte_crypto_op_ctophys_offset(ops[i], + iv_offset); sym_op->cipher.iv.length = test_vector->iv.length; if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || @@ -258,6 +274,13 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, sym_op->auth.data.offset = 0; } + if (options->test == CPERF_TEST_TYPE_VERIFY) { + for (i = 0; i < nb_ops; i++) + memcpy(ops[i]->sym->cipher.iv.data, + test_vector->iv.data, + test_vector->iv.length); + } + return 0; } @@ -266,7 +289,8 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, - const struct cperf_test_vector *test_vector) + const struct cperf_test_vector *test_vector, + uint16_t iv_offset) { uint16_t i; @@ -279,8 +303,10 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, sym_op->m_dst = bufs_out[i]; /* cipher parameters */ - sym_op->cipher.iv.data = test_vector->iv.data; - sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr; + sym_op->cipher.iv.data = rte_crypto_op_ctod_offset(ops[i], + uint8_t *, iv_offset); + sym_op->cipher.iv.phys_addr = rte_crypto_op_ctophys_offset(ops[i], + iv_offset); sym_op->cipher.iv.length = test_vector->iv.length; sym_op->cipher.data.length = options->test_buffer_size; @@ -327,6 +353,13 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, sym_op->auth.data.offset = options->auth_aad_sz; } + if (options->test == CPERF_TEST_TYPE_VERIFY) { + for (i = 0; i < nb_ops; i++) + memcpy(ops[i]->sym->cipher.iv.data, + test_vector->iv.data, + test_vector->iv.length); + } + return 0; } diff --git a/app/test-crypto-perf/cperf_ops.h b/app/test-crypto-perf/cperf_ops.h index 1b748daf0a..f7b431c721 100644 --- a/app/test-crypto-perf/cperf_ops.h +++ b/app/test-crypto-perf/cperf_ops.h @@ -48,7 +48,8 @@ typedef int (*cperf_populate_ops_t)(struct rte_crypto_op **ops, struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, - const struct cperf_test_vector *test_vector); + const struct cperf_test_vector *test_vector, + uint16_t iv_offset); struct cperf_op_fns { cperf_sessions_create_t sess_create; diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c index 32cf5fdd00..c33129ba11 100644 --- a/app/test-crypto-perf/cperf_test_latency.c +++ b/app/test-crypto-perf/cperf_test_latency.c @@ -280,7 +280,7 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id, snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d", dev_id); - uint16_t priv_size = sizeof(struct priv_op_data); + uint16_t priv_size = sizeof(struct priv_op_data) + test_vector->iv.length; ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name, RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 512, priv_size, rte_socket_id()); @@ -355,6 +355,10 @@ cperf_latency_test_runner(void *arg) else test_burst_size = ctx->options->burst_size_list[0]; + uint16_t iv_offset = sizeof(struct rte_crypto_op) + + sizeof(struct rte_crypto_sym_op) + + sizeof(struct cperf_op_result *); + while (test_burst_size <= ctx->options->max_burst_size) { uint64_t ops_enqd = 0, ops_deqd = 0; uint64_t m_idx = 0, b_idx = 0; @@ -383,7 +387,7 @@ cperf_latency_test_runner(void *arg) (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx], &ctx->mbufs_out[m_idx], burst_size, ctx->sess, ctx->options, - ctx->test_vector); + ctx->test_vector, iv_offset); tsc_start = rte_rdtsc_precise(); diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c index 85947a53da..5a90eb0cb2 100644 --- a/app/test-crypto-perf/cperf_test_throughput.c +++ b/app/test-crypto-perf/cperf_test_throughput.c @@ -262,9 +262,11 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id, snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d", dev_id); + uint16_t priv_size = test_vector->iv.length; + ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name, - RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 512, 0, - rte_socket_id()); + RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, + 512, priv_size, rte_socket_id()); if (ctx->crypto_op_pool == NULL) goto err; @@ -315,6 +317,9 @@ cperf_throughput_test_runner(void *test_ctx) else test_burst_size = ctx->options->burst_size_list[0]; + uint16_t iv_offset = sizeof(struct rte_crypto_op) + + sizeof(struct rte_crypto_sym_op); + while (test_burst_size <= ctx->options->max_burst_size) { uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0; uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0; @@ -346,7 +351,7 @@ cperf_throughput_test_runner(void *test_ctx) (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx], &ctx->mbufs_out[m_idx], ops_needed, ctx->sess, ctx->options, - ctx->test_vector); + ctx->test_vector, iv_offset); /** * When ops_needed is smaller than ops_enqd, the diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c index f384e3d92b..404f899bce 100644 --- a/app/test-crypto-perf/cperf_test_vector_parsing.c +++ b/app/test-crypto-perf/cperf_test_vector_parsing.c @@ -1,3 +1,34 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ #ifdef RTE_EXEC_ENV_BSDAPP #define _WITH_GETLINE #endif @@ -303,7 +334,6 @@ parse_entry(char *entry, struct cperf_test_vector *vector, } else if (strstr(key_token, "iv")) { rte_free(vector->iv.data); vector->iv.data = data; - vector->iv.phys_addr = rte_malloc_virt2phy(vector->iv.data); if (tc_found) vector->iv.length = data_length; else { diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c index 757957f78c..36b3f6f128 100644 --- a/app/test-crypto-perf/cperf_test_vectors.c +++ b/app/test-crypto-perf/cperf_test_vectors.c @@ -423,7 +423,6 @@ cperf_test_vector_get_dummy(struct cperf_options *options) memcpy(t_vec->iv.data, iv, options->cipher_iv_sz); } t_vec->ciphertext.length = options->max_buffer_size; - t_vec->iv.phys_addr = rte_malloc_virt2phy(t_vec->iv.data); t_vec->iv.length = options->cipher_iv_sz; t_vec->data.cipher_offset = 0; t_vec->data.cipher_length = options->max_buffer_size; diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c index b19f5e1369..be684a63ec 100644 --- a/app/test-crypto-perf/cperf_test_verify.c +++ b/app/test-crypto-perf/cperf_test_verify.c @@ -266,9 +266,10 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id, snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d", dev_id); + uint16_t priv_size = test_vector->iv.length; ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name, - RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 512, 0, - rte_socket_id()); + RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, + 512, priv_size, rte_socket_id()); if (ctx->crypto_op_pool == NULL) goto err; @@ -417,6 +418,9 @@ cperf_verify_test_runner(void *test_ctx) printf("\n# Running verify test on device: %u, lcore: %u\n", ctx->dev_id, lcore); + uint16_t iv_offset = sizeof(struct rte_crypto_op) + + sizeof(struct rte_crypto_sym_op); + while (ops_enqd_total < ctx->options->total_ops) { uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size) @@ -438,7 +442,7 @@ cperf_verify_test_runner(void *test_ctx) (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx], &ctx->mbufs_out[m_idx], ops_needed, ctx->sess, ctx->options, - ctx->test_vector); + ctx->test_vector, iv_offset); #ifdef CPERF_LINEARIZATION_ENABLE if (linearize) {