X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-crypto-perf%2Fcperf_ops.c;h=f59568b801ace666c0023126ec8dd8db95c46899;hb=ec54bc9d5195b8fbd7927523df19c56856044c3c;hp=f76dbdd3c7f9f729badcaec2877657d62b1105c8;hpb=bf9d6702eca95f01105e93ab38fc36e9932314f8;p=dpdk.git diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c index f76dbdd3c7..f59568b801 100644 --- a/app/test-crypto-perf/cperf_ops.c +++ b/app/test-crypto-perf/cperf_ops.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include @@ -41,7 +13,7 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector __rte_unused, - uint16_t iv_offset __rte_unused) + uint16_t iv_offset __rte_unused, uint32_t *imix_idx) { uint16_t i; @@ -62,7 +34,12 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops, dst_buf_offset); /* cipher parameters */ - sym_op->cipher.data.length = options->test_buffer_size; + if (options->imix_distribution_count) { + sym_op->cipher.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->cipher.data.length = options->test_buffer_size; sym_op->cipher.data.offset = 0; } @@ -75,7 +52,7 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector __rte_unused, - uint16_t iv_offset __rte_unused) + uint16_t iv_offset __rte_unused, uint32_t *imix_idx) { uint16_t i; @@ -96,7 +73,12 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops, dst_buf_offset); /* auth parameters */ - sym_op->auth.data.length = options->test_buffer_size; + if (options->imix_distribution_count) { + sym_op->auth.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->auth.data.length = options->test_buffer_size; sym_op->auth.data.offset = 0; } @@ -109,7 +91,7 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector, - uint16_t iv_offset) + uint16_t iv_offset, uint32_t *imix_idx) { uint16_t i; @@ -130,12 +112,17 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops, dst_buf_offset); /* cipher parameters */ + if (options->imix_distribution_count) { + sym_op->cipher.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->cipher.data.length = options->test_buffer_size; + if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) - sym_op->cipher.data.length = options->test_buffer_size << 3; - else - sym_op->cipher.data.length = options->test_buffer_size; + sym_op->cipher.data.length <<= 3; sym_op->cipher.data.offset = 0; } @@ -160,7 +147,7 @@ cperf_set_ops_auth(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector, - uint16_t iv_offset) + uint16_t iv_offset, uint32_t *imix_idx) { uint16_t i; @@ -221,16 +208,21 @@ cperf_set_ops_auth(struct rte_crypto_op **ops, sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf, uint8_t *, offset); sym_op->auth.digest.phys_addr = - rte_pktmbuf_mtophys_offset(buf, offset); + rte_pktmbuf_iova_offset(buf, offset); } + if (options->imix_distribution_count) { + sym_op->auth.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->auth.data.length = options->test_buffer_size; + if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) - sym_op->auth.data.length = options->test_buffer_size << 3; - else - sym_op->auth.data.length = options->test_buffer_size; + sym_op->auth.data.length <<= 3; sym_op->auth.data.offset = 0; } @@ -255,7 +247,7 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector, - uint16_t iv_offset) + uint16_t iv_offset, uint32_t *imix_idx) { uint16_t i; @@ -276,12 +268,17 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, dst_buf_offset); /* cipher parameters */ + if (options->imix_distribution_count) { + sym_op->cipher.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->cipher.data.length = options->test_buffer_size; + if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) - sym_op->cipher.data.length = options->test_buffer_size << 3; - else - sym_op->cipher.data.length = options->test_buffer_size; + sym_op->cipher.data.length <<= 3; sym_op->cipher.data.offset = 0; @@ -318,15 +315,20 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf, uint8_t *, offset); sym_op->auth.digest.phys_addr = - rte_pktmbuf_mtophys_offset(buf, offset); + rte_pktmbuf_iova_offset(buf, offset); } + if (options->imix_distribution_count) { + sym_op->auth.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->auth.data.length = options->test_buffer_size; + if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) - sym_op->auth.data.length = options->test_buffer_size << 3; - else - sym_op->auth.data.length = options->test_buffer_size; + sym_op->auth.data.length <<= 3; sym_op->auth.data.offset = 0; } @@ -360,9 +362,10 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector, - uint16_t iv_offset) + uint16_t iv_offset, uint32_t *imix_idx) { uint16_t i; + /* AAD is placed after the IV */ uint16_t aad_offset = iv_offset + RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16); @@ -383,7 +386,12 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, dst_buf_offset); /* AEAD parameters */ - sym_op->aead.data.length = options->test_buffer_size; + if (options->imix_distribution_count) { + sym_op->aead.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->aead.data.length = options->test_buffer_size; sym_op->aead.data.offset = 0; sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i], @@ -424,7 +432,7 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf, uint8_t *, offset); sym_op->aead.digest.phys_addr = - rte_pktmbuf_mtophys_offset(buf, offset); + rte_pktmbuf_iova_offset(buf, offset); } } @@ -433,13 +441,26 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *, iv_offset); - memcpy(iv_ptr, test_vector->aead_iv.data, + /* + * If doing AES-CCM, nonce is copied one byte + * after the start of IV field, and AAD is copied + * 18 bytes after the start of the AAD field. + */ + if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) { + memcpy(iv_ptr + 1, test_vector->aead_iv.data, test_vector->aead_iv.length); - /* Copy AAD after the IV */ - memcpy(ops[i]->sym->aead.aad.data, - test_vector->aad.data, - test_vector->aad.length); + memcpy(ops[i]->sym->aead.aad.data + 18, + test_vector->aad.data, + test_vector->aad.length); + } else { + memcpy(iv_ptr, test_vector->aead_iv.data, + test_vector->aead_iv.length); + + memcpy(ops[i]->sym->aead.aad.data, + test_vector->aad.data, + test_vector->aad.length); + } } } @@ -448,6 +469,7 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, static struct rte_cryptodev_sym_session * cperf_create_session(struct rte_mempool *sess_mp, + struct rte_mempool *priv_mp, uint8_t dev_id, const struct cperf_options *options, const struct cperf_test_vector *test_vector, @@ -484,7 +506,7 @@ cperf_create_session(struct rte_mempool *sess_mp, } /* create crypto session */ rte_cryptodev_sym_session_init(dev_id, sess, &cipher_xform, - sess_mp); + priv_mp); /* * auth only */ @@ -493,6 +515,7 @@ cperf_create_session(struct rte_mempool *sess_mp, auth_xform.next = NULL; auth_xform.auth.algo = options->auth_algo; auth_xform.auth.op = options->auth_op; + auth_xform.auth.iv.offset = iv_offset; /* auth different than null */ if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { @@ -511,7 +534,7 @@ cperf_create_session(struct rte_mempool *sess_mp, } /* create crypto session */ rte_cryptodev_sym_session_init(dev_id, sess, &auth_xform, - sess_mp); + priv_mp); /* * cipher and auth */ @@ -547,6 +570,8 @@ cperf_create_session(struct rte_mempool *sess_mp, auth_xform.next = NULL; auth_xform.auth.algo = options->auth_algo; auth_xform.auth.op = options->auth_op; + auth_xform.auth.iv.offset = iv_offset + + cipher_xform.cipher.iv.length; /* auth different than null */ if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { @@ -568,12 +593,12 @@ cperf_create_session(struct rte_mempool *sess_mp, cipher_xform.next = &auth_xform; /* create crypto session */ rte_cryptodev_sym_session_init(dev_id, - sess, &cipher_xform, sess_mp); + sess, &cipher_xform, priv_mp); } else { /* auth then cipher */ auth_xform.next = &cipher_xform; /* create crypto session */ rte_cryptodev_sym_session_init(dev_id, - sess, &auth_xform, sess_mp); + sess, &auth_xform, priv_mp); } } else { /* options->op_type == CPERF_AEAD */ aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD; @@ -594,7 +619,7 @@ cperf_create_session(struct rte_mempool *sess_mp, /* Create crypto session */ rte_cryptodev_sym_session_init(dev_id, - sess, &aead_xform, sess_mp); + sess, &aead_xform, priv_mp); } return sess;