X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-crypto-perf%2Fcperf_ops.c;h=97584ceedbc70be3234e127433a2bb2c603ba75a;hb=2adb3b4e7e54dcce626560da39b6fb6adfe914bc;hp=bc6b24fc221b17ac28eb832083912fcdb075461c;hpb=7df074333930f84a0d7f855c5a9f97524a6606c5;p=dpdk.git diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c index bc6b24fc22..97584ceedb 100644 --- a/app/test-crypto-perf/cperf_ops.c +++ b/app/test-crypto-perf/cperf_ops.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include @@ -35,13 +7,51 @@ #include "cperf_ops.h" #include "cperf_test_vectors.h" +#ifdef RTE_LIBRTE_SECURITY +static int +cperf_set_ops_security(struct rte_crypto_op **ops, + uint32_t src_buf_offset __rte_unused, + uint32_t dst_buf_offset __rte_unused, + uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, + const struct cperf_options *options __rte_unused, + const struct cperf_test_vector *test_vector __rte_unused, + uint16_t iv_offset __rte_unused, + uint32_t *imix_idx __rte_unused) +{ + uint16_t i; + + for (i = 0; i < nb_ops; i++) { + struct rte_crypto_sym_op *sym_op = ops[i]->sym; + struct rte_security_session *sec_sess = + (struct rte_security_session *)sess; + + ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + rte_security_attach_session(ops[i], sec_sess); + sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + + src_buf_offset); + sym_op->m_src->buf_len = options->segment_sz; + sym_op->m_src->data_len = options->test_buffer_size; + sym_op->m_src->pkt_len = sym_op->m_src->data_len; + + /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */ + if (dst_buf_offset == 0) + sym_op->m_dst = NULL; + else + sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] + + dst_buf_offset); + } + + return 0; +} +#endif + static int cperf_set_ops_null_cipher(struct rte_crypto_op **ops, uint32_t src_buf_offset, uint32_t dst_buf_offset, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector __rte_unused, - uint16_t iv_offset __rte_unused) + uint16_t iv_offset __rte_unused, uint32_t *imix_idx) { uint16_t i; @@ -62,7 +72,12 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops, dst_buf_offset); /* cipher parameters */ - sym_op->cipher.data.length = options->test_buffer_size; + if (options->imix_distribution_count) { + sym_op->cipher.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->cipher.data.length = options->test_buffer_size; sym_op->cipher.data.offset = 0; } @@ -75,7 +90,7 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector __rte_unused, - uint16_t iv_offset __rte_unused) + uint16_t iv_offset __rte_unused, uint32_t *imix_idx) { uint16_t i; @@ -96,7 +111,12 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops, dst_buf_offset); /* auth parameters */ - sym_op->auth.data.length = options->test_buffer_size; + if (options->imix_distribution_count) { + sym_op->auth.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->auth.data.length = options->test_buffer_size; sym_op->auth.data.offset = 0; } @@ -109,7 +129,7 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector, - uint16_t iv_offset) + uint16_t iv_offset, uint32_t *imix_idx) { uint16_t i; @@ -130,12 +150,17 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops, dst_buf_offset); /* cipher parameters */ + if (options->imix_distribution_count) { + sym_op->cipher.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->cipher.data.length = options->test_buffer_size; + if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) - sym_op->cipher.data.length = options->test_buffer_size << 3; - else - sym_op->cipher.data.length = options->test_buffer_size; + sym_op->cipher.data.length <<= 3; sym_op->cipher.data.offset = 0; } @@ -160,7 +185,7 @@ cperf_set_ops_auth(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector, - uint16_t iv_offset) + uint16_t iv_offset, uint32_t *imix_idx) { uint16_t i; @@ -221,16 +246,21 @@ cperf_set_ops_auth(struct rte_crypto_op **ops, sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf, uint8_t *, offset); sym_op->auth.digest.phys_addr = - rte_pktmbuf_mtophys_offset(buf, offset); + rte_pktmbuf_iova_offset(buf, offset); } + if (options->imix_distribution_count) { + sym_op->auth.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->auth.data.length = options->test_buffer_size; + if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) - sym_op->auth.data.length = options->test_buffer_size << 3; - else - sym_op->auth.data.length = options->test_buffer_size; + sym_op->auth.data.length <<= 3; sym_op->auth.data.offset = 0; } @@ -255,7 +285,7 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector, - uint16_t iv_offset) + uint16_t iv_offset, uint32_t *imix_idx) { uint16_t i; @@ -276,12 +306,17 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, dst_buf_offset); /* cipher parameters */ + if (options->imix_distribution_count) { + sym_op->cipher.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->cipher.data.length = options->test_buffer_size; + if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) - sym_op->cipher.data.length = options->test_buffer_size << 3; - else - sym_op->cipher.data.length = options->test_buffer_size; + sym_op->cipher.data.length <<= 3; sym_op->cipher.data.offset = 0; @@ -318,15 +353,20 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf, uint8_t *, offset); sym_op->auth.digest.phys_addr = - rte_pktmbuf_mtophys_offset(buf, offset); + rte_pktmbuf_iova_offset(buf, offset); } + if (options->imix_distribution_count) { + sym_op->auth.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->auth.data.length = options->test_buffer_size; + if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) - sym_op->auth.data.length = options->test_buffer_size << 3; - else - sym_op->auth.data.length = options->test_buffer_size; + sym_op->auth.data.length <<= 3; sym_op->auth.data.offset = 0; } @@ -360,7 +400,7 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector, - uint16_t iv_offset) + uint16_t iv_offset, uint32_t *imix_idx) { uint16_t i; /* AAD is placed after the IV */ @@ -384,7 +424,12 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, dst_buf_offset); /* AEAD parameters */ - sym_op->aead.data.length = options->test_buffer_size; + if (options->imix_distribution_count) { + sym_op->aead.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->aead.data.length = options->test_buffer_size; sym_op->aead.data.offset = 0; sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i], @@ -425,7 +470,7 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf, uint8_t *, offset); sym_op->aead.digest.phys_addr = - rte_pktmbuf_mtophys_offset(buf, offset); + rte_pktmbuf_iova_offset(buf, offset); } } @@ -462,6 +507,7 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, static struct rte_cryptodev_sym_session * cperf_create_session(struct rte_mempool *sess_mp, + struct rte_mempool *priv_mp, uint8_t dev_id, const struct cperf_options *options, const struct cperf_test_vector *test_vector, @@ -472,6 +518,78 @@ cperf_create_session(struct rte_mempool *sess_mp, struct rte_crypto_sym_xform aead_xform; struct rte_cryptodev_sym_session *sess = NULL; +#ifdef RTE_LIBRTE_SECURITY + /* + * security only + */ + if (options->op_type == CPERF_PDCP) { + /* Setup Cipher Parameters */ + cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + cipher_xform.next = NULL; + cipher_xform.cipher.algo = options->cipher_algo; + cipher_xform.cipher.op = options->cipher_op; + cipher_xform.cipher.iv.offset = iv_offset; + + /* cipher different than null */ + if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { + cipher_xform.cipher.key.data = test_vector->cipher_key.data; + cipher_xform.cipher.key.length = test_vector->cipher_key.length; + cipher_xform.cipher.iv.length = test_vector->cipher_iv.length; + } else { + cipher_xform.cipher.key.data = NULL; + cipher_xform.cipher.key.length = 0; + cipher_xform.cipher.iv.length = 0; + } + + /* Setup Auth Parameters */ + if (options->auth_algo != 0) { + auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; + auth_xform.next = NULL; + auth_xform.auth.algo = options->auth_algo; + auth_xform.auth.op = options->auth_op; + auth_xform.auth.iv.offset = iv_offset + + cipher_xform.cipher.iv.length; + + /* auth different than null */ + if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { + auth_xform.auth.digest_length = options->digest_sz; + auth_xform.auth.key.length = test_vector->auth_key.length; + auth_xform.auth.key.data = test_vector->auth_key.data; + auth_xform.auth.iv.length = test_vector->auth_iv.length; + } else { + auth_xform.auth.digest_length = 0; + auth_xform.auth.key.length = 0; + auth_xform.auth.key.data = NULL; + auth_xform.auth.iv.length = 0; + } + + cipher_xform.next = &auth_xform; + } else { + cipher_xform.next = NULL; + } + + struct rte_security_session_conf sess_conf = { + .action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_PDCP, + {.pdcp = { + .bearer = 0x16, + .domain = options->pdcp_domain, + .pkt_dir = 0, + .sn_size = options->pdcp_sn_sz, + .hfn = 0x1, + .hfn_threshold = 0x70C0A, + } }, + .crypto_xform = &cipher_xform + }; + + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_cryptodev_get_sec_ctx(dev_id); + + /* Create security session */ + return (void *)rte_security_session_create(ctx, + &sess_conf, sess_mp); + } +#endif sess = rte_cryptodev_sym_session_create(sess_mp); /* * cipher only @@ -498,7 +616,7 @@ cperf_create_session(struct rte_mempool *sess_mp, } /* create crypto session */ rte_cryptodev_sym_session_init(dev_id, sess, &cipher_xform, - sess_mp); + priv_mp); /* * auth only */ @@ -507,6 +625,7 @@ cperf_create_session(struct rte_mempool *sess_mp, auth_xform.next = NULL; auth_xform.auth.algo = options->auth_algo; auth_xform.auth.op = options->auth_op; + auth_xform.auth.iv.offset = iv_offset; /* auth different than null */ if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { @@ -525,7 +644,7 @@ cperf_create_session(struct rte_mempool *sess_mp, } /* create crypto session */ rte_cryptodev_sym_session_init(dev_id, sess, &auth_xform, - sess_mp); + priv_mp); /* * cipher and auth */ @@ -561,6 +680,8 @@ cperf_create_session(struct rte_mempool *sess_mp, auth_xform.next = NULL; auth_xform.auth.algo = options->auth_algo; auth_xform.auth.op = options->auth_op; + auth_xform.auth.iv.offset = iv_offset + + cipher_xform.cipher.iv.length; /* auth different than null */ if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) { @@ -582,12 +703,12 @@ cperf_create_session(struct rte_mempool *sess_mp, cipher_xform.next = &auth_xform; /* create crypto session */ rte_cryptodev_sym_session_init(dev_id, - sess, &cipher_xform, sess_mp); + sess, &cipher_xform, priv_mp); } else { /* auth then cipher */ auth_xform.next = &cipher_xform; /* create crypto session */ rte_cryptodev_sym_session_init(dev_id, - sess, &auth_xform, sess_mp); + sess, &auth_xform, priv_mp); } } else { /* options->op_type == CPERF_AEAD */ aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD; @@ -608,7 +729,7 @@ cperf_create_session(struct rte_mempool *sess_mp, /* Create crypto session */ rte_cryptodev_sym_session_init(dev_id, - sess, &aead_xform, sess_mp); + sess, &aead_xform, priv_mp); } return sess; @@ -646,6 +767,11 @@ cperf_get_op_functions(const struct cperf_options *options, op_fns->populate_ops = cperf_set_ops_cipher; return 0; } - +#ifdef RTE_LIBRTE_SECURITY + if (options->op_type == CPERF_PDCP) { + op_fns->populate_ops = cperf_set_ops_security; + return 0; + } +#endif return -1; }