F: app/test-pmd/
F: doc/guides/testpmd_app_ug/
+Crypto performance test application
+M: Declan Doherty <declan.doherty@intel.com>
+F: app/test-crypto-perf/
+
Procinfo tool
M: Maryam Tahhan <maryam.tahhan@intel.com>
M: Reshma Pattan <reshma.pattan@intel.com>
DIRS-$(CONFIG_RTE_LIBRTE_ACL) += test-acl
DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += test-pipeline
DIRS-$(CONFIG_RTE_TEST_PMD) += test-pmd
+DIRS-$(CONFIG_RTE_APP_CRYPTO_PERF) += test-crypto-perf
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_test
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += proc_info
DIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += pdump
--- /dev/null
+# BSD LICENSE
+#
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+
+APP = dpdk-test-crypto-perf
+
+CFLAGS += $(WERROR_FLAGS)
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+SRCS-y += cperf_ops.c
+SRCS-y += cperf_options_parsing.c
+SRCS-y += cperf_test_vectors.c
+SRCS-y += cperf_test_throughput.c
+SRCS-y += cperf_test_latency.c
+SRCS-y += cperf_test_vector_parsing.c
+
+# this application needs libraries first
+DEPDIRS-y += lib
+
+include $(RTE_SDK)/mk/rte.app.mk
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_
+#define _CPERF_
+
+#include <rte_crypto.h>
+
+#include "cperf_ops.h"
+
+struct cperf_options;
+struct cperf_test_vector;
+struct cperf_op_fns;
+
+typedef void *(*cperf_constructor_t)(uint8_t dev_id, uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *t_vec,
+ const struct cperf_op_fns *op_fns);
+
+typedef int (*cperf_runner_t)(void *test_ctx);
+typedef void (*cperf_destructor_t)(void *test_ctx);
+
+struct cperf_test {
+ cperf_constructor_t constructor;
+ cperf_runner_t runner;
+ cperf_destructor_t destructor;
+};
+
+#endif /* _CPERF_ */
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cryptodev.h>
+
+#include "cperf_ops.h"
+#include "cperf_test_vectors.h"
+
+static int
+cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
+ struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector __rte_unused)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+
+ rte_crypto_op_attach_sym_session(ops[i], sess);
+
+ sym_op->m_src = bufs_in[i];
+ sym_op->m_dst = bufs_out[i];
+
+ /* cipher parameters */
+ sym_op->cipher.data.length = options->buffer_sz;
+ sym_op->cipher.data.offset = 0;
+ }
+
+ return 0;
+}
+
+static int
+cperf_set_ops_null_auth(struct rte_crypto_op **ops,
+ struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector __rte_unused)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+
+ rte_crypto_op_attach_sym_session(ops[i], sess);
+
+ sym_op->m_src = bufs_in[i];
+ sym_op->m_dst = bufs_out[i];
+
+ /* auth parameters */
+ sym_op->auth.data.length = options->buffer_sz;
+ sym_op->auth.data.offset = 0;
+ }
+
+ return 0;
+}
+
+static int
+cperf_set_ops_cipher(struct rte_crypto_op **ops,
+ struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+
+ rte_crypto_op_attach_sym_session(ops[i], sess);
+
+ sym_op->m_src = bufs_in[i];
+ sym_op->m_dst = bufs_out[i];
+
+ /* cipher parameters */
+ sym_op->cipher.iv.data = test_vector->iv.data;
+ sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
+ sym_op->cipher.iv.length = test_vector->iv.length;
+
+ sym_op->cipher.data.length = options->buffer_sz;
+ sym_op->cipher.data.offset = 0;
+ }
+
+ return 0;
+}
+
+static int
+cperf_set_ops_auth(struct rte_crypto_op **ops,
+ struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+
+ rte_crypto_op_attach_sym_session(ops[i], sess);
+
+ sym_op->m_src = bufs_in[i];
+ sym_op->m_dst = bufs_out[i];
+
+ /* authentication parameters */
+ if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ sym_op->auth.digest.data = test_vector->digest.data;
+ sym_op->auth.digest.phys_addr =
+ test_vector->digest.phys_addr;
+ sym_op->auth.digest.length = options->auth_digest_sz;
+ } else {
+
+ uint32_t offset = options->buffer_sz;
+ struct rte_mbuf *buf, *tbuf;
+
+ if (options->out_of_place) {
+ buf = bufs_out[i];
+ } else {
+ buf = bufs_in[i];
+
+ tbuf = buf;
+ while ((tbuf->next != NULL) &&
+ (offset >= tbuf->data_len)) {
+ offset -= tbuf->data_len;
+ tbuf = tbuf->next;
+ }
+ }
+
+ sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
+ uint8_t *, offset);
+ sym_op->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(buf, offset);
+ sym_op->auth.digest.length = options->auth_digest_sz;
+ sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
+ sym_op->auth.aad.data = test_vector->aad.data;
+ sym_op->auth.aad.length = options->auth_aad_sz;
+
+ }
+
+ sym_op->auth.data.length = options->buffer_sz;
+ sym_op->auth.data.offset = 0;
+ }
+
+ return 0;
+}
+
+static int
+cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
+ struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+
+ rte_crypto_op_attach_sym_session(ops[i], sess);
+
+ sym_op->m_src = bufs_in[i];
+ sym_op->m_dst = bufs_out[i];
+
+ /* cipher parameters */
+ sym_op->cipher.iv.data = test_vector->iv.data;
+ sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
+ sym_op->cipher.iv.length = test_vector->iv.length;
+
+ sym_op->cipher.data.length = options->buffer_sz;
+ sym_op->cipher.data.offset = 0;
+
+ /* authentication parameters */
+ if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ sym_op->auth.digest.data = test_vector->digest.data;
+ sym_op->auth.digest.phys_addr =
+ test_vector->digest.phys_addr;
+ sym_op->auth.digest.length = options->auth_digest_sz;
+ } else {
+
+ uint32_t offset = options->buffer_sz;
+ struct rte_mbuf *buf, *tbuf;
+
+ if (options->out_of_place) {
+ buf = bufs_out[i];
+ } else {
+ buf = bufs_in[i];
+
+ tbuf = buf;
+ while ((tbuf->next != NULL) &&
+ (offset >= tbuf->data_len)) {
+ offset -= tbuf->data_len;
+ tbuf = tbuf->next;
+ }
+ }
+
+ sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
+ uint8_t *, offset);
+ sym_op->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(buf, offset);
+ sym_op->auth.digest.length = options->auth_digest_sz;
+ sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
+ sym_op->auth.aad.data = test_vector->aad.data;
+ sym_op->auth.aad.length = options->auth_aad_sz;
+ }
+
+ sym_op->auth.data.length = options->buffer_sz;
+ sym_op->auth.data.offset = 0;
+ }
+
+ return 0;
+}
+
+static int
+cperf_set_ops_aead(struct rte_crypto_op **ops,
+ struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+
+ rte_crypto_op_attach_sym_session(ops[i], sess);
+
+ sym_op->m_src = bufs_in[i];
+ sym_op->m_dst = bufs_out[i];
+
+ /* cipher parameters */
+ sym_op->cipher.iv.data = test_vector->iv.data;
+ sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
+ sym_op->cipher.iv.length = test_vector->iv.length;
+
+ sym_op->cipher.data.length = options->buffer_sz;
+ sym_op->cipher.data.offset =
+ RTE_ALIGN_CEIL(options->auth_aad_sz, 16);
+
+ sym_op->auth.aad.data = rte_pktmbuf_mtod(bufs_in[i], uint8_t *);
+ sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(bufs_in[i]);
+ sym_op->auth.aad.length = options->auth_aad_sz;
+
+ /* authentication parameters */
+ if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ sym_op->auth.digest.data = test_vector->digest.data;
+ sym_op->auth.digest.phys_addr =
+ test_vector->digest.phys_addr;
+ sym_op->auth.digest.length = options->auth_digest_sz;
+ } else {
+
+ uint32_t offset = sym_op->cipher.data.length +
+ sym_op->cipher.data.offset;
+ struct rte_mbuf *buf, *tbuf;
+
+ if (options->out_of_place) {
+ buf = bufs_out[i];
+ } else {
+ buf = bufs_in[i];
+
+ tbuf = buf;
+ while ((tbuf->next != NULL) &&
+ (offset >= tbuf->data_len)) {
+ offset -= tbuf->data_len;
+ tbuf = tbuf->next;
+ }
+ }
+
+ sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
+ uint8_t *, offset);
+ sym_op->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(buf, offset);
+
+ sym_op->auth.digest.length = options->auth_digest_sz;
+ }
+
+ sym_op->auth.data.length = options->buffer_sz;
+ sym_op->auth.data.offset = options->auth_aad_sz;
+ }
+
+ return 0;
+}
+
+static struct rte_cryptodev_sym_session *
+cperf_create_session(uint8_t dev_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
+ struct rte_cryptodev_sym_session *sess = NULL;
+
+ /*
+ * cipher only
+ */
+ if (options->op_type == CPERF_CIPHER_ONLY) {
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.next = NULL;
+ cipher_xform.cipher.algo = options->cipher_algo;
+ cipher_xform.cipher.op = options->cipher_op;
+
+ /* cipher different than null */
+ if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
+ cipher_xform.cipher.key.data =
+ test_vector->cipher_key.data;
+ cipher_xform.cipher.key.length =
+ test_vector->cipher_key.length;
+ }
+ /* create crypto session */
+ sess = rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ /*
+ * auth only
+ */
+ } else if (options->op_type == CPERF_AUTH_ONLY) {
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.next = NULL;
+ auth_xform.auth.algo = options->auth_algo;
+ auth_xform.auth.op = options->auth_op;
+
+ /* auth different than null */
+ if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
+ auth_xform.auth.digest_length =
+ options->auth_digest_sz;
+ auth_xform.auth.add_auth_data_length =
+ options->auth_aad_sz;
+ auth_xform.auth.key.length =
+ test_vector->auth_key.length;
+ auth_xform.auth.key.data = test_vector->auth_key.data;
+ }
+ /* create crypto session */
+ sess = rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ /*
+ * cipher and auth
+ */
+ } else if (options->op_type == CPERF_CIPHER_THEN_AUTH
+ || options->op_type == CPERF_AUTH_THEN_CIPHER
+ || options->op_type == CPERF_AEAD) {
+
+ /*
+ * cipher
+ */
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.next = NULL;
+ cipher_xform.cipher.algo = options->cipher_algo;
+ cipher_xform.cipher.op = options->cipher_op;
+
+ /* cipher different than null */
+ if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
+ cipher_xform.cipher.key.data =
+ test_vector->cipher_key.data;
+ cipher_xform.cipher.key.length =
+ test_vector->cipher_key.length;
+ }
+
+ /*
+ * auth
+ */
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.next = NULL;
+ auth_xform.auth.algo = options->auth_algo;
+ auth_xform.auth.op = options->auth_op;
+
+ /* auth different than null */
+ if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
+ auth_xform.auth.digest_length = options->auth_digest_sz;
+ auth_xform.auth.add_auth_data_length =
+ options->auth_aad_sz;
+ /* auth options for aes gcm */
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM &&
+ options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM) {
+ auth_xform.auth.key.length = 0;
+ auth_xform.auth.key.data = NULL;
+ } else { /* auth options for others */
+ auth_xform.auth.key.length =
+ test_vector->auth_key.length;
+ auth_xform.auth.key.data =
+ test_vector->auth_key.data;
+ }
+ }
+
+ /* create crypto session for aes gcm */
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM) {
+ if (options->cipher_op ==
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ cipher_xform.next = &auth_xform;
+ /* create crypto session */
+ sess = rte_cryptodev_sym_session_create(dev_id,
+ &cipher_xform);
+ } else { /* decrypt */
+ auth_xform.next = &cipher_xform;
+ /* create crypto session */
+ sess = rte_cryptodev_sym_session_create(dev_id,
+ &auth_xform);
+ }
+ } else { /* create crypto session for other */
+ /* cipher then auth */
+ if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
+ cipher_xform.next = &auth_xform;
+ /* create crypto session */
+ sess = rte_cryptodev_sym_session_create(dev_id,
+ &cipher_xform);
+ } else { /* auth then cipher */
+ auth_xform.next = &cipher_xform;
+ /* create crypto session */
+ sess = rte_cryptodev_sym_session_create(dev_id,
+ &auth_xform);
+ }
+ }
+ }
+ return sess;
+}
+
+int
+cperf_get_op_functions(const struct cperf_options *options,
+ struct cperf_op_fns *op_fns)
+{
+ memset(op_fns, 0, sizeof(struct cperf_op_fns));
+
+ op_fns->sess_create = cperf_create_session;
+
+ if (options->op_type == CPERF_AEAD
+ || options->op_type == CPERF_AUTH_THEN_CIPHER
+ || options->op_type == CPERF_CIPHER_THEN_AUTH) {
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM &&
+ options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM)
+ op_fns->populate_ops = cperf_set_ops_aead;
+ else
+ op_fns->populate_ops = cperf_set_ops_cipher_auth;
+ return 0;
+ }
+ if (options->op_type == CPERF_AUTH_ONLY) {
+ if (options->auth_algo == RTE_CRYPTO_AUTH_NULL)
+ op_fns->populate_ops = cperf_set_ops_null_auth;
+ else
+ op_fns->populate_ops = cperf_set_ops_auth;
+ return 0;
+ }
+ if (options->op_type == CPERF_CIPHER_ONLY) {
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL)
+ op_fns->populate_ops = cperf_set_ops_null_cipher;
+ else
+ op_fns->populate_ops = cperf_set_ops_cipher;
+ return 0;
+ }
+
+ return -1;
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_OPS_
+#define _CPERF_OPS_
+
+#include <rte_crypto.h>
+
+#include "cperf.h"
+#include "cperf_options.h"
+#include "cperf_test_vectors.h"
+
+
+typedef struct rte_cryptodev_sym_session *(*cperf_sessions_create_t)(
+ uint8_t dev_id, const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector);
+
+typedef int (*cperf_populate_ops_t)(struct rte_crypto_op **ops,
+ struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector);
+
+
+typedef int (*cperf_verify_crypto_op_t)(struct rte_mbuf *m,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector);
+
+struct cperf_op_fns {
+ cperf_sessions_create_t sess_create;
+ cperf_populate_ops_t populate_ops;
+};
+
+int
+cperf_get_op_functions(const struct cperf_options *options,
+ struct cperf_op_fns *op_fns);
+
+#endif /* _CPERF_OPS_ */
--- /dev/null
+
+#ifndef _CPERF_OPTIONS_
+#define _CPERF_OPTIONS_
+
+#include <rte_crypto.h>
+
+#define CPERF_PTEST_TYPE ("ptest")
+#define CPERF_SILENT ("silent")
+
+#define CPERF_POOL_SIZE ("pool-sz")
+#define CPERF_TOTAL_OPS ("total-ops")
+#define CPERF_BURST_SIZE ("burst-sz")
+#define CPERF_BUFFER_SIZE ("buffer-sz")
+#define CPERF_SEGMENTS_NB ("segments-nb")
+
+#define CPERF_DEVTYPE ("devtype")
+#define CPERF_OPTYPE ("optype")
+#define CPERF_SESSIONLESS ("sessionless")
+#define CPERF_OUT_OF_PLACE ("out-of-place")
+#define CPERF_VERIFY ("verify")
+#define CPERF_TEST_FILE ("test-file")
+#define CPERF_TEST_NAME ("test-name")
+
+#define CPERF_CIPHER_ALGO ("cipher-algo")
+#define CPERF_CIPHER_OP ("cipher-op")
+#define CPERF_CIPHER_KEY_SZ ("cipher-key-sz")
+#define CPERF_CIPHER_IV_SZ ("cipher-iv-sz")
+
+#define CPERF_AUTH_ALGO ("auth-algo")
+#define CPERF_AUTH_OP ("auth-op")
+#define CPERF_AUTH_KEY_SZ ("auth-key-sz")
+#define CPERF_AUTH_DIGEST_SZ ("auth-digest-sz")
+#define CPERF_AUTH_AAD_SZ ("auth-aad-sz")
+#define CPERF_CSV ("csv-friendly")
+
+
+enum cperf_perf_test_type {
+ CPERF_TEST_TYPE_THROUGHPUT,
+ CPERF_TEST_TYPE_CYCLECOUNT,
+ CPERF_TEST_TYPE_LATENCY
+};
+
+
+extern const char *cperf_test_type_strs[];
+
+enum cperf_op_type {
+ CPERF_CIPHER_ONLY = 1,
+ CPERF_AUTH_ONLY,
+ CPERF_CIPHER_THEN_AUTH,
+ CPERF_AUTH_THEN_CIPHER,
+ CPERF_AEAD
+};
+
+extern const char *cperf_op_type_strs[];
+
+struct cperf_options {
+ enum cperf_perf_test_type test;
+
+ uint32_t pool_sz;
+ uint32_t total_ops;
+ uint32_t burst_sz;
+ uint32_t buffer_sz;
+ uint32_t segments_nb;
+
+ char device_type[RTE_CRYPTODEV_NAME_LEN];
+ enum cperf_op_type op_type;
+
+ uint32_t sessionless:1;
+ uint32_t out_of_place:1;
+ uint32_t verify:1;
+ uint32_t silent:1;
+ uint32_t csv:1;
+
+ char *test_file;
+ char *test_name;
+
+ enum rte_crypto_cipher_algorithm cipher_algo;
+ enum rte_crypto_cipher_operation cipher_op;
+
+ uint16_t cipher_key_sz;
+ uint16_t cipher_iv_sz;
+
+ enum rte_crypto_auth_algorithm auth_algo;
+ enum rte_crypto_auth_operation auth_op;
+
+ uint16_t auth_key_sz;
+ uint16_t auth_digest_sz;
+ uint16_t auth_aad_sz;
+};
+
+void
+cperf_options_default(struct cperf_options *options);
+
+int
+cperf_options_parse(struct cperf_options *options,
+ int argc, char **argv);
+
+int
+cperf_options_check(struct cperf_options *options);
+
+void
+cperf_options_dump(struct cperf_options *options);
+
+#endif
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <getopt.h>
+#include <unistd.h>
+
+#include <rte_malloc.h>
+
+#include "cperf_options.h"
+
+struct name_id_map {
+ const char *name;
+ uint32_t id;
+};
+
+static int
+get_str_key_id_mapping(struct name_id_map *map, unsigned int map_len,
+ const char *str_key)
+{
+ unsigned int i;
+
+ for (i = 0; i < map_len; i++) {
+
+ if (strcmp(str_key, map[i].name) == 0)
+ return map[i].id;
+ }
+
+ return -1;
+}
+
+static int
+parse_cperf_test_type(struct cperf_options *opts, const char *arg)
+{
+ struct name_id_map cperftest_namemap[] = {
+ {
+ cperf_test_type_strs[CPERF_TEST_TYPE_THROUGHPUT],
+ CPERF_TEST_TYPE_THROUGHPUT
+ },
+ {
+ cperf_test_type_strs[CPERF_TEST_TYPE_CYCLECOUNT],
+ CPERF_TEST_TYPE_CYCLECOUNT
+ },
+ {
+ cperf_test_type_strs[CPERF_TEST_TYPE_LATENCY],
+ CPERF_TEST_TYPE_LATENCY
+ }
+ };
+
+ int id = get_str_key_id_mapping(
+ (struct name_id_map *)cperftest_namemap,
+ RTE_DIM(cperftest_namemap), arg);
+ if (id < 0) {
+ RTE_LOG(ERR, USER1, "failed to parse test type");
+ return -1;
+ }
+
+ opts->test = (enum cperf_perf_test_type)id;
+
+ return 0;
+}
+
+static int
+parse_uint32_t(uint32_t *value, const char *arg)
+{
+ char *end = NULL;
+ unsigned long n = strtoul(arg, &end, 10);
+
+ if ((optarg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+
+ if (n > UINT32_MAX)
+ return -ERANGE;
+
+ *value = (uint32_t) n;
+
+ return 0;
+}
+
+static int
+parse_uint16_t(uint16_t *value, const char *arg)
+{
+ uint32_t val = 0;
+ int ret = parse_uint32_t(&val, arg);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = (uint16_t) val;
+
+ return 0;
+}
+
+static int
+parse_total_ops(struct cperf_options *opts, const char *arg)
+{
+ int ret = parse_uint32_t(&opts->total_ops, arg);
+
+ if (ret)
+ RTE_LOG(ERR, USER1, "failed to parse total operations count");
+
+ return ret;
+}
+
+static int
+parse_pool_sz(struct cperf_options *opts, const char *arg)
+{
+ int ret = parse_uint32_t(&opts->pool_sz, arg);
+
+ if (ret)
+ RTE_LOG(ERR, USER1, "failed to parse pool size");
+ return ret;
+}
+
+static int
+parse_burst_sz(struct cperf_options *opts, const char *arg)
+{
+ int ret = parse_uint32_t(&opts->burst_sz, arg);
+
+ if (ret)
+ RTE_LOG(ERR, USER1, "failed to parse burst size");
+ return ret;
+}
+
+static int
+parse_buffer_sz(struct cperf_options *opts, const char *arg)
+{
+ uint32_t i, valid_buf_sz[] = {
+ 32, 64, 128, 256, 384, 512, 768, 1024, 1280, 1536, 1792,
+ 2048
+ };
+
+ if (parse_uint32_t(&opts->buffer_sz, arg)) {
+ RTE_LOG(ERR, USER1, "failed to parse buffer size");
+ return -1;
+ }
+
+ for (i = 0; i < RTE_DIM(valid_buf_sz); i++)
+ if (valid_buf_sz[i] == opts->buffer_sz)
+ return 0;
+
+ RTE_LOG(ERR, USER1, "invalid buffer size specified");
+ return -1;
+}
+
+static int
+parse_segments_nb(struct cperf_options *opts, const char *arg)
+{
+ int ret = parse_uint32_t(&opts->segments_nb, arg);
+
+ if (ret) {
+ RTE_LOG(ERR, USER1, "failed to parse segments number\n");
+ return -1;
+ }
+
+ if ((opts->segments_nb == 0) || (opts->segments_nb > 255)) {
+ RTE_LOG(ERR, USER1, "invalid segments number specified\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+parse_device_type(struct cperf_options *opts, const char *arg)
+{
+ if (strlen(arg) > (sizeof(opts->device_type) - 1))
+ return -1;
+
+ strncpy(opts->device_type, arg, sizeof(opts->device_type));
+
+ return 0;
+}
+
+static int
+parse_op_type(struct cperf_options *opts, const char *arg)
+{
+ struct name_id_map optype_namemap[] = {
+ {
+ cperf_op_type_strs[CPERF_CIPHER_ONLY],
+ CPERF_CIPHER_ONLY
+ },
+ {
+ cperf_op_type_strs[CPERF_AUTH_ONLY],
+ CPERF_AUTH_ONLY
+ },
+ {
+ cperf_op_type_strs[CPERF_CIPHER_THEN_AUTH],
+ CPERF_CIPHER_THEN_AUTH
+ },
+ {
+ cperf_op_type_strs[CPERF_AUTH_THEN_CIPHER],
+ CPERF_AUTH_THEN_CIPHER
+ },
+ {
+ cperf_op_type_strs[CPERF_AEAD],
+ CPERF_AEAD
+ }
+ };
+
+ int id = get_str_key_id_mapping(optype_namemap,
+ RTE_DIM(optype_namemap), arg);
+ if (id < 0) {
+ RTE_LOG(ERR, USER1, "invalid opt type specified\n");
+ return -1;
+ }
+
+ opts->op_type = (enum cperf_op_type)id;
+
+ return 0;
+}
+
+static int
+parse_sessionless(struct cperf_options *opts,
+ const char *arg __rte_unused)
+{
+ opts->sessionless = 1;
+ return 0;
+}
+
+static int
+parse_out_of_place(struct cperf_options *opts,
+ const char *arg __rte_unused)
+{
+ opts->out_of_place = 1;
+ return 0;
+}
+
+static int
+parse_verify(struct cperf_options *opts,
+ const char *arg __rte_unused)
+{
+ opts->verify = 1;
+
+ return 0;
+}
+
+static int
+parse_test_file(struct cperf_options *opts,
+ const char *arg)
+{
+ opts->test_file = strdup(arg);
+ if (access(opts->test_file, F_OK) != -1)
+ return 0;
+ RTE_LOG(ERR, USER1, "Test vector file doesn't exist\n");
+
+ return -1;
+}
+
+static int
+parse_test_name(struct cperf_options *opts,
+ const char *arg)
+{
+ char *test_name = (char *) rte_zmalloc(NULL,
+ sizeof(char) * (strlen(arg) + 3), 0);
+ snprintf(test_name, strlen(arg) + 3, "[%s]", arg);
+ opts->test_name = test_name;
+
+ return 0;
+}
+
+static int
+parse_silent(struct cperf_options *opts,
+ const char *arg __rte_unused)
+{
+ opts->silent = 1;
+
+ return 0;
+}
+
+static int
+parse_cipher_algo(struct cperf_options *opts, const char *arg)
+{
+ struct name_id_map cipher_algo_namemap[] = {
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_3DES_CBC],
+ RTE_CRYPTO_CIPHER_3DES_CBC
+ },
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_3DES_ECB],
+ RTE_CRYPTO_CIPHER_3DES_ECB
+ },
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_3DES_CTR],
+ RTE_CRYPTO_CIPHER_3DES_CTR
+ },
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_AES_CBC],
+ RTE_CRYPTO_CIPHER_AES_CBC
+ },
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_AES_CCM],
+ RTE_CRYPTO_CIPHER_AES_CCM
+ },
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_AES_CTR],
+ RTE_CRYPTO_CIPHER_AES_CTR
+ },
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_AES_ECB],
+ RTE_CRYPTO_CIPHER_AES_ECB
+ },
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_AES_GCM],
+ RTE_CRYPTO_CIPHER_AES_GCM
+ },
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_AES_F8],
+ RTE_CRYPTO_CIPHER_AES_F8
+ },
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_AES_XTS],
+ RTE_CRYPTO_CIPHER_AES_XTS
+ },
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_ARC4],
+ RTE_CRYPTO_CIPHER_ARC4
+ },
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_NULL],
+ RTE_CRYPTO_CIPHER_NULL
+ },
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_KASUMI_F8],
+ RTE_CRYPTO_CIPHER_KASUMI_F8
+ },
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_SNOW3G_UEA2],
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2
+ },
+ {
+ rte_crypto_cipher_algorithm_strings
+ [RTE_CRYPTO_CIPHER_ZUC_EEA3],
+ RTE_CRYPTO_CIPHER_ZUC_EEA3
+ },
+ };
+
+
+ int id = get_str_key_id_mapping(cipher_algo_namemap,
+ RTE_DIM(cipher_algo_namemap), arg);
+ if (id < 0) {
+ RTE_LOG(ERR, USER1, "Invalid cipher algorithm specified\n");
+ return -1;
+ }
+
+ opts->cipher_algo = (enum rte_crypto_cipher_algorithm)id;
+
+ return 0;
+}
+
+static int
+parse_cipher_op(struct cperf_options *opts, const char *arg)
+{
+ struct name_id_map cipher_op_namemap[] = {
+ {
+ rte_crypto_cipher_operation_strings
+ [RTE_CRYPTO_CIPHER_OP_ENCRYPT],
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT },
+ {
+ rte_crypto_cipher_operation_strings
+ [RTE_CRYPTO_CIPHER_OP_DECRYPT],
+ RTE_CRYPTO_CIPHER_OP_DECRYPT
+ }
+ };
+
+ int id = get_str_key_id_mapping(cipher_op_namemap,
+ RTE_DIM(cipher_op_namemap), arg);
+ if (id < 0) {
+ RTE_LOG(ERR, USER1, "Invalid cipher operation specified\n");
+ return -1;
+ }
+
+ opts->cipher_op = (enum rte_crypto_cipher_operation)id;
+
+ return 0;
+}
+
+static int
+parse_cipher_key_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->cipher_key_sz, arg);
+}
+
+static int
+parse_cipher_iv_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->cipher_iv_sz, arg);
+}
+
+static int
+parse_auth_algo(struct cperf_options *opts, const char *arg) {
+ struct name_id_map cipher_auth_namemap[] = {
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_AES_CBC_MAC],
+ RTE_CRYPTO_AUTH_AES_CBC_MAC
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_AES_CCM],
+ RTE_CRYPTO_AUTH_AES_CCM
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_AES_CMAC],
+ RTE_CRYPTO_AUTH_AES_CMAC
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_AES_GCM],
+ RTE_CRYPTO_AUTH_AES_GCM
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_AES_GMAC],
+ RTE_CRYPTO_AUTH_AES_GMAC
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_AES_XCBC_MAC],
+ RTE_CRYPTO_AUTH_AES_XCBC_MAC
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_MD5],
+ RTE_CRYPTO_AUTH_MD5
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_MD5_HMAC],
+ RTE_CRYPTO_AUTH_MD5_HMAC
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_SHA1],
+ RTE_CRYPTO_AUTH_SHA1
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_SHA1_HMAC],
+ RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_SHA224],
+ RTE_CRYPTO_AUTH_SHA224
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_SHA224_HMAC],
+ RTE_CRYPTO_AUTH_SHA224_HMAC
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_SHA256],
+ RTE_CRYPTO_AUTH_SHA256
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_SHA256_HMAC],
+ RTE_CRYPTO_AUTH_SHA256_HMAC
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_SHA384],
+ RTE_CRYPTO_AUTH_SHA384
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_SHA384_HMAC],
+ RTE_CRYPTO_AUTH_SHA384_HMAC
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_SHA512],
+ RTE_CRYPTO_AUTH_SHA512
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_SHA512_HMAC],
+ RTE_CRYPTO_AUTH_SHA512_HMAC
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_KASUMI_F9],
+ RTE_CRYPTO_AUTH_KASUMI_F9
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_SNOW3G_UIA2],
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2
+ },
+ {
+ rte_crypto_auth_algorithm_strings
+ [RTE_CRYPTO_AUTH_ZUC_EIA3],
+ RTE_CRYPTO_AUTH_ZUC_EIA3
+ },
+ };
+
+
+ int id = get_str_key_id_mapping(cipher_auth_namemap,
+ RTE_DIM(cipher_auth_namemap), arg);
+ if (id < 0) {
+ RTE_LOG(ERR, USER1, "invalid authentication algorithm specified"
+ "\n");
+ return -1;
+ }
+
+ opts->auth_algo = (enum rte_crypto_auth_algorithm)id;
+
+ return 0;
+}
+
+static int
+parse_auth_op(struct cperf_options *opts, const char *arg)
+{
+ struct name_id_map auth_op_namemap[] = {
+ {
+ rte_crypto_auth_operation_strings
+ [RTE_CRYPTO_AUTH_OP_GENERATE],
+ RTE_CRYPTO_AUTH_OP_GENERATE },
+ {
+ rte_crypto_auth_operation_strings
+ [RTE_CRYPTO_AUTH_OP_VERIFY],
+ RTE_CRYPTO_AUTH_OP_VERIFY
+ }
+ };
+
+ int id = get_str_key_id_mapping(auth_op_namemap,
+ RTE_DIM(auth_op_namemap), arg);
+ if (id < 0) {
+ RTE_LOG(ERR, USER1, "invalid authentication operation specified"
+ "\n");
+ return -1;
+ }
+
+ opts->auth_op = (enum rte_crypto_auth_operation)id;
+
+ return 0;
+}
+
+static int
+parse_auth_key_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->auth_key_sz, arg);
+}
+
+static int
+parse_auth_digest_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->auth_digest_sz, arg);
+}
+
+static int
+parse_auth_aad_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->auth_aad_sz, arg);
+}
+
+static int
+parse_csv_friendly(struct cperf_options *opts, const char *arg __rte_unused)
+{
+ opts->csv = 1;
+ opts->silent = 1;
+ return 0;
+}
+
+typedef int (*option_parser_t)(struct cperf_options *opts,
+ const char *arg);
+
+struct long_opt_parser {
+ const char *lgopt_name;
+ option_parser_t parser_fn;
+
+};
+
+static struct option lgopts[] = {
+
+ { CPERF_PTEST_TYPE, required_argument, 0, 0 },
+
+ { CPERF_POOL_SIZE, required_argument, 0, 0 },
+ { CPERF_TOTAL_OPS, required_argument, 0, 0 },
+ { CPERF_BURST_SIZE, required_argument, 0, 0 },
+ { CPERF_BUFFER_SIZE, required_argument, 0, 0 },
+ { CPERF_SEGMENTS_NB, required_argument, 0, 0 },
+
+ { CPERF_DEVTYPE, required_argument, 0, 0 },
+ { CPERF_OPTYPE, required_argument, 0, 0 },
+
+ { CPERF_SILENT, no_argument, 0, 0 },
+ { CPERF_SESSIONLESS, no_argument, 0, 0 },
+ { CPERF_OUT_OF_PLACE, no_argument, 0, 0 },
+ { CPERF_VERIFY, no_argument, 0, 0 },
+ { CPERF_TEST_FILE, required_argument, 0, 0 },
+ { CPERF_TEST_NAME, required_argument, 0, 0 },
+
+ { CPERF_CIPHER_ALGO, required_argument, 0, 0 },
+ { CPERF_CIPHER_OP, required_argument, 0, 0 },
+
+ { CPERF_CIPHER_KEY_SZ, required_argument, 0, 0 },
+ { CPERF_CIPHER_IV_SZ, required_argument, 0, 0 },
+
+ { CPERF_AUTH_ALGO, required_argument, 0, 0 },
+ { CPERF_AUTH_OP, required_argument, 0, 0 },
+
+ { CPERF_AUTH_KEY_SZ, required_argument, 0, 0 },
+ { CPERF_AUTH_DIGEST_SZ, required_argument, 0, 0 },
+ { CPERF_AUTH_AAD_SZ, required_argument, 0, 0 },
+ { CPERF_CSV, no_argument, 0, 0},
+
+ { NULL, 0, 0, 0 }
+};
+
+void
+cperf_options_default(struct cperf_options *opts)
+{
+ opts->test = CPERF_TEST_TYPE_THROUGHPUT;
+
+ opts->pool_sz = 8192;
+ opts->total_ops = 10000000;
+ opts->burst_sz = 32;
+ opts->buffer_sz = 64;
+ opts->segments_nb = 1;
+
+ strncpy(opts->device_type, "crypto_aesni_mb",
+ sizeof(opts->device_type));
+
+ opts->op_type = CPERF_CIPHER_THEN_AUTH;
+
+ opts->silent = 0;
+ opts->verify = 0;
+ opts->test_file = NULL;
+ opts->test_name = NULL;
+ opts->sessionless = 0;
+ opts->out_of_place = 0;
+ opts->csv = 0;
+
+ opts->cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ opts->cipher_op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ opts->cipher_key_sz = 16;
+ opts->cipher_iv_sz = 16;
+
+ opts->auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ opts->auth_op = RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ opts->auth_key_sz = 64;
+ opts->auth_digest_sz = 12;
+ opts->auth_aad_sz = 0;
+}
+
+static int
+cperf_opts_parse_long(int opt_idx, struct cperf_options *opts)
+{
+ struct long_opt_parser parsermap[] = {
+ { CPERF_PTEST_TYPE, parse_cperf_test_type },
+ { CPERF_SILENT, parse_silent },
+ { CPERF_POOL_SIZE, parse_pool_sz },
+ { CPERF_TOTAL_OPS, parse_total_ops },
+ { CPERF_BURST_SIZE, parse_burst_sz },
+ { CPERF_BUFFER_SIZE, parse_buffer_sz },
+ { CPERF_SEGMENTS_NB, parse_segments_nb },
+ { CPERF_DEVTYPE, parse_device_type },
+ { CPERF_OPTYPE, parse_op_type },
+ { CPERF_SESSIONLESS, parse_sessionless },
+ { CPERF_OUT_OF_PLACE, parse_out_of_place },
+ { CPERF_VERIFY, parse_verify },
+ { CPERF_TEST_FILE, parse_test_file },
+ { CPERF_TEST_NAME, parse_test_name },
+ { CPERF_CIPHER_ALGO, parse_cipher_algo },
+ { CPERF_CIPHER_OP, parse_cipher_op },
+ { CPERF_CIPHER_KEY_SZ, parse_cipher_key_sz },
+ { CPERF_CIPHER_IV_SZ, parse_cipher_iv_sz },
+ { CPERF_AUTH_ALGO, parse_auth_algo },
+ { CPERF_AUTH_OP, parse_auth_op },
+ { CPERF_AUTH_KEY_SZ, parse_auth_key_sz },
+ { CPERF_AUTH_DIGEST_SZ, parse_auth_digest_sz },
+ { CPERF_AUTH_AAD_SZ, parse_auth_aad_sz },
+ { CPERF_CSV, parse_csv_friendly},
+ };
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(parsermap); i++) {
+ if (strncmp(lgopts[opt_idx].name, parsermap[i].lgopt_name,
+ strlen(lgopts[opt_idx].name)) == 0)
+ return parsermap[i].parser_fn(opts, optarg);
+ }
+
+ return -EINVAL;
+}
+
+int
+cperf_options_parse(struct cperf_options *options, int argc, char **argv)
+{
+ int opt, retval, opt_idx;
+
+ while ((opt = getopt_long(argc, argv, "", lgopts, &opt_idx)) != EOF) {
+ switch (opt) {
+ /* long options */
+ case 0:
+
+ retval = cperf_opts_parse_long(opt_idx, options);
+ if (retval != 0)
+ return retval;
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int
+cperf_options_check(struct cperf_options *options)
+{
+ if (options->segments_nb > options->buffer_sz) {
+ RTE_LOG(ERR, USER1,
+ "Segments number greater than buffer size.\n");
+ return -EINVAL;
+ }
+
+ if (options->verify && options->test_file == NULL) {
+ RTE_LOG(ERR, USER1, "Define path to the file with test"
+ " vectors.\n");
+ return -EINVAL;
+ }
+
+ if (options->test_name != NULL && options->test_file == NULL) {
+ RTE_LOG(ERR, USER1, "Define path to the file with test"
+ " vectors.\n");
+ return -EINVAL;
+ }
+
+ if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY &&
+ options->test_file == NULL) {
+ RTE_LOG(ERR, USER1, "Define path to the file with test"
+ " vectors.\n");
+ return -EINVAL;
+ }
+
+ if (options->verify &&
+ options->total_ops > options->pool_sz) {
+ RTE_LOG(ERR, USER1, "Total number of ops must be less than or"
+ " equal to the pool size.\n");
+ return -EINVAL;
+ }
+
+ if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
+ if (options->cipher_op != RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ options->auth_op !=
+ RTE_CRYPTO_AUTH_OP_GENERATE) {
+ RTE_LOG(ERR, USER1, "Option cipher then auth must use"
+ " options: encrypt and generate.\n");
+ return -EINVAL;
+ }
+ } else if (options->op_type == CPERF_AUTH_THEN_CIPHER) {
+ if (options->cipher_op != RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ options->auth_op !=
+ RTE_CRYPTO_AUTH_OP_VERIFY) {
+ RTE_LOG(ERR, USER1, "Option auth then cipher must use"
+ " options: decrypt and verify.\n");
+ return -EINVAL;
+ }
+ } else if (options->op_type == CPERF_AEAD) {
+ if (!(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ options->auth_op ==
+ RTE_CRYPTO_AUTH_OP_GENERATE) &&
+ !(options->cipher_op ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ options->auth_op ==
+ RTE_CRYPTO_AUTH_OP_VERIFY)) {
+ RTE_LOG(ERR, USER1, "Use together options: encrypt and"
+ " generate or decrypt and verify.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void
+cperf_options_dump(struct cperf_options *opts)
+{
+ printf("# Crypto Performance Application Options:\n");
+ printf("#\n");
+ printf("# cperf test: %s\n", cperf_test_type_strs[opts->test]);
+ printf("#\n");
+ printf("# size of crypto op / mbuf pool: %u\n", opts->pool_sz);
+ printf("# total number of ops: %u\n", opts->total_ops);
+ printf("# burst size: %u\n", opts->burst_sz);
+ printf("# buffer size: %u\n", opts->buffer_sz);
+ printf("# segments per buffer: %u\n", opts->segments_nb);
+ printf("#\n");
+ printf("# cryptodev type: %s\n", opts->device_type);
+ printf("#\n");
+ printf("# crypto operation: %s\n", cperf_op_type_strs[opts->op_type]);
+ printf("# verify operation: %s\n", opts->verify ? "yes" : "no");
+ printf("# sessionless: %s\n", opts->sessionless ? "yes" : "no");
+ printf("# out of place: %s\n", opts->out_of_place ? "yes" : "no");
+
+ printf("#\n");
+
+ if (opts->op_type == CPERF_AUTH_ONLY ||
+ opts->op_type == CPERF_CIPHER_THEN_AUTH ||
+ opts->op_type == CPERF_AUTH_THEN_CIPHER ||
+ opts->op_type == CPERF_AEAD) {
+ printf("# auth algorithm: %s\n",
+ rte_crypto_auth_algorithm_strings[opts->auth_algo]);
+ printf("# auth operation: %s\n",
+ rte_crypto_auth_operation_strings[opts->auth_op]);
+ printf("# auth key size: %u\n", opts->auth_key_sz);
+ printf("# auth digest size: %u\n", opts->auth_digest_sz);
+ printf("# auth aad size: %u\n", opts->auth_aad_sz);
+ printf("#\n");
+ }
+
+ if (opts->op_type == CPERF_CIPHER_ONLY ||
+ opts->op_type == CPERF_CIPHER_THEN_AUTH ||
+ opts->op_type == CPERF_AUTH_THEN_CIPHER ||
+ opts->op_type == CPERF_AEAD) {
+ printf("# cipher algorithm: %s\n",
+ rte_crypto_cipher_algorithm_strings[opts->cipher_algo]);
+ printf("# cipher operation: %s\n",
+ rte_crypto_cipher_operation_strings[opts->cipher_op]);
+ printf("# cipher key size: %u\n", opts->cipher_key_sz);
+ printf("# cipher iv size: %u\n", opts->cipher_iv_sz);
+ printf("#\n");
+ }
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+
+#include "cperf_test_latency.h"
+#include "cperf_ops.h"
+
+
+struct cperf_latency_results {
+
+ uint64_t ops_failed;
+
+ uint64_t enqd_tot;
+ uint64_t enqd_max;
+ uint64_t enqd_min;
+
+ uint64_t deqd_tot;
+ uint64_t deqd_max;
+ uint64_t deqd_min;
+
+ uint64_t cycles_tot;
+ uint64_t cycles_max;
+ uint64_t cycles_min;
+
+ uint64_t burst_num;
+ uint64_t num;
+};
+
+struct cperf_op_result {
+ uint64_t tsc_start;
+ uint64_t tsc_end;
+ enum rte_crypto_op_status status;
+};
+
+struct cperf_latency_ctx {
+ uint8_t dev_id;
+ uint16_t qp_id;
+ uint8_t lcore_id;
+
+ struct rte_mempool *pkt_mbuf_pool_in;
+ struct rte_mempool *pkt_mbuf_pool_out;
+ struct rte_mbuf **mbufs_in;
+ struct rte_mbuf **mbufs_out;
+
+ struct rte_mempool *crypto_op_pool;
+
+ struct rte_cryptodev_sym_session *sess;
+
+ cperf_populate_ops_t populate_ops;
+ cperf_verify_crypto_op_t verify_op_output;
+
+ const struct cperf_options *options;
+ const struct cperf_test_vector *test_vector;
+ struct cperf_op_result *res;
+ struct cperf_latency_results results;
+};
+
+#define max(a, b) (a > b ? (uint64_t)a : (uint64_t)b)
+#define min(a, b) (a < b ? (uint64_t)a : (uint64_t)b)
+
+static void
+cperf_latency_test_free(struct cperf_latency_ctx *ctx, uint32_t mbuf_nb)
+{
+ uint32_t i;
+
+ if (ctx) {
+ if (ctx->sess)
+ rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+
+ if (ctx->mbufs_in) {
+ for (i = 0; i < mbuf_nb; i++)
+ rte_pktmbuf_free(ctx->mbufs_in[i]);
+
+ rte_free(ctx->mbufs_in);
+ }
+
+ if (ctx->mbufs_out) {
+ for (i = 0; i < mbuf_nb; i++) {
+ if (ctx->mbufs_out[i] != NULL)
+ rte_pktmbuf_free(ctx->mbufs_out[i]);
+ }
+
+ rte_free(ctx->mbufs_out);
+ }
+
+ if (ctx->pkt_mbuf_pool_in)
+ rte_mempool_free(ctx->pkt_mbuf_pool_in);
+
+ if (ctx->pkt_mbuf_pool_out)
+ rte_mempool_free(ctx->pkt_mbuf_pool_out);
+
+ if (ctx->crypto_op_pool)
+ rte_mempool_free(ctx->crypto_op_pool);
+
+ rte_free(ctx->res);
+ rte_free(ctx);
+ }
+}
+
+static struct rte_mbuf *
+cperf_mbuf_create(struct rte_mempool *mempool,
+ uint32_t segments_nb,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ struct rte_mbuf *mbuf;
+ uint32_t segment_sz = options->buffer_sz / segments_nb;
+ uint32_t last_sz = options->buffer_sz % segments_nb;
+ uint8_t *mbuf_data;
+ uint8_t *test_data =
+ (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ test_vector->plaintext.data :
+ test_vector->ciphertext.data;
+
+ mbuf = rte_pktmbuf_alloc(mempool);
+ if (mbuf == NULL)
+ goto error;
+
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, segment_sz);
+ test_data += segment_sz;
+ segments_nb--;
+
+ while (segments_nb) {
+ struct rte_mbuf *m;
+
+ m = rte_pktmbuf_alloc(mempool);
+ if (m == NULL)
+ goto error;
+
+ rte_pktmbuf_chain(mbuf, m);
+
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, segment_sz);
+ test_data += segment_sz;
+ segments_nb--;
+ }
+
+ if (last_sz) {
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, last_sz);
+ }
+
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
+ options->auth_digest_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ if (options->op_type == CPERF_AEAD) {
+ uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
+ RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+
+ if (aead == NULL)
+ goto error;
+
+ memcpy(aead, test_vector->aad.data, test_vector->aad.length);
+ }
+
+ return mbuf;
+error:
+ if (mbuf != NULL)
+ rte_pktmbuf_free(mbuf);
+
+ return NULL;
+}
+
+void *
+cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ const struct cperf_op_fns *op_fns)
+{
+ struct cperf_latency_ctx *ctx = NULL;
+ unsigned int mbuf_idx = 0;
+ char pool_name[32] = "";
+
+ ctx = rte_malloc(NULL, sizeof(struct cperf_latency_ctx), 0);
+ if (ctx == NULL)
+ goto err;
+
+ ctx->dev_id = dev_id;
+ ctx->qp_id = qp_id;
+
+ ctx->populate_ops = op_fns->populate_ops;
+ ctx->options = options;
+ ctx->test_vector = test_vector;
+
+ ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ if (ctx->sess == NULL)
+ goto err;
+
+ snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
+ dev_id);
+
+ ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
+ options->pool_sz * options->segments_nb, 0, 0,
+ RTE_PKTMBUF_HEADROOM +
+ RTE_CACHE_LINE_ROUNDUP(
+ (options->buffer_sz / options->segments_nb) +
+ (options->buffer_sz % options->segments_nb) +
+ options->auth_digest_sz),
+ rte_socket_id());
+
+ if (ctx->pkt_mbuf_pool_in == NULL)
+ goto err;
+
+ /* Generate mbufs_in with plaintext populated for test */
+ if (ctx->options->pool_sz % ctx->options->burst_sz)
+ goto err;
+
+ ctx->mbufs_in = rte_malloc(NULL,
+ (sizeof(struct rte_mbuf *) *
+ ctx->options->pool_sz), 0);
+
+ for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
+ ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
+ ctx->pkt_mbuf_pool_in, options->segments_nb,
+ options, test_vector);
+ if (ctx->mbufs_in[mbuf_idx] == NULL)
+ goto err;
+ }
+
+ if (options->out_of_place == 1) {
+
+ snprintf(pool_name, sizeof(pool_name),
+ "cperf_pool_out_cdev_%d",
+ dev_id);
+
+ ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
+ pool_name, options->pool_sz, 0, 0,
+ RTE_PKTMBUF_HEADROOM +
+ RTE_CACHE_LINE_ROUNDUP(
+ options->buffer_sz +
+ options->auth_digest_sz),
+ rte_socket_id());
+
+ if (ctx->pkt_mbuf_pool_out == NULL)
+ goto err;
+ }
+
+ ctx->mbufs_out = rte_malloc(NULL,
+ (sizeof(struct rte_mbuf *) *
+ ctx->options->pool_sz), 0);
+
+ for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
+ if (options->out_of_place == 1) {
+ ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
+ ctx->pkt_mbuf_pool_out, 1,
+ options, test_vector);
+ if (ctx->mbufs_out[mbuf_idx] == NULL)
+ goto err;
+ } else {
+ ctx->mbufs_out[mbuf_idx] = NULL;
+ }
+ }
+
+ snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
+ dev_id);
+
+ ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
+ rte_socket_id());
+ if (ctx->crypto_op_pool == NULL)
+ goto err;
+
+ ctx->res = rte_malloc(NULL, sizeof(struct cperf_op_result) *
+ ctx->options->total_ops, 0);
+
+ if (ctx->res == NULL)
+ goto err;
+
+ return ctx;
+err:
+ cperf_latency_test_free(ctx, mbuf_idx);
+
+ return NULL;
+}
+
+static int
+cperf_latency_test_verifier(struct rte_mbuf *mbuf,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *vector)
+{
+ const struct rte_mbuf *m;
+ uint32_t len;
+ uint16_t nb_segs;
+ uint8_t *data;
+ uint32_t cipher_offset, auth_offset;
+ uint8_t cipher, auth;
+ int res = 0;
+
+ m = mbuf;
+ nb_segs = m->nb_segs;
+ len = 0;
+ while (m && nb_segs != 0) {
+ len += m->data_len;
+ m = m->next;
+ nb_segs--;
+ }
+
+ data = rte_malloc(NULL, len, 0);
+ if (data == NULL)
+ return 1;
+
+ m = mbuf;
+ nb_segs = m->nb_segs;
+ len = 0;
+ while (m && nb_segs != 0) {
+ memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
+ m->data_len);
+ len += m->data_len;
+ m = m->next;
+ nb_segs--;
+ }
+
+ switch (options->op_type) {
+ case CPERF_CIPHER_ONLY:
+ cipher = 1;
+ cipher_offset = 0;
+ auth = 0;
+ auth_offset = 0;
+ break;
+ case CPERF_CIPHER_THEN_AUTH:
+ cipher = 1;
+ cipher_offset = 0;
+ auth = 1;
+ auth_offset = vector->plaintext.length;
+ break;
+ case CPERF_AUTH_ONLY:
+ cipher = 0;
+ cipher_offset = 0;
+ auth = 1;
+ auth_offset = vector->plaintext.length;
+ break;
+ case CPERF_AUTH_THEN_CIPHER:
+ cipher = 1;
+ cipher_offset = 0;
+ auth = 1;
+ auth_offset = vector->plaintext.length;
+ break;
+ case CPERF_AEAD:
+ cipher = 1;
+ cipher_offset = vector->aad.length;
+ auth = 1;
+ auth_offset = vector->aad.length + vector->plaintext.length;
+ break;
+ }
+
+ if (cipher == 1) {
+ if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ res += memcmp(data + cipher_offset,
+ vector->ciphertext.data,
+ vector->ciphertext.length);
+ else
+ res += memcmp(data + cipher_offset,
+ vector->plaintext.data,
+ vector->plaintext.length);
+ }
+
+ if (auth == 1) {
+ if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ res += memcmp(data + auth_offset,
+ vector->digest.data,
+ vector->digest.length);
+ }
+
+ if (res != 0)
+ res = 1;
+
+ return res;
+}
+
+int
+cperf_latency_test_runner(void *arg)
+{
+ struct cperf_latency_ctx *ctx = arg;
+ struct cperf_op_result *pres;
+
+ if (ctx == NULL)
+ return 0;
+
+ struct rte_crypto_op *ops[ctx->options->burst_sz];
+ struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
+ uint64_t ops_enqd = 0, ops_deqd = 0;
+ uint16_t ops_unused = 0;
+ uint64_t m_idx = 0, b_idx = 0, i;
+
+ uint64_t tsc_val, tsc_end, tsc_start;
+ uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
+ uint64_t enqd_max = 0, enqd_min = ~0UL, enqd_tot = 0;
+ uint64_t deqd_max = 0, deqd_min = ~0UL, deqd_tot = 0;
+
+ uint32_t lcore = rte_lcore_id();
+
+#ifdef CPERF_LINEARIZATION_ENABLE
+ struct rte_cryptodev_info dev_info;
+ int linearize = 0;
+
+ /* Check if source mbufs require coalescing */
+ if (ctx->options->segments_nb > 1) {
+ rte_cryptodev_info_get(ctx->dev_id, &dev_info);
+ if ((dev_info.feature_flags &
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
+ linearize = 1;
+ }
+#endif /* CPERF_LINEARIZATION_ENABLE */
+
+ ctx->lcore_id = lcore;
+
+ /* Warm up the host CPU before starting the test */
+ for (i = 0; i < ctx->options->total_ops; i++)
+ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+
+ while (enqd_tot < ctx->options->total_ops) {
+
+ uint16_t burst_size = ((enqd_tot + ctx->options->burst_sz)
+ <= ctx->options->total_ops) ?
+ ctx->options->burst_sz :
+ ctx->options->total_ops -
+ enqd_tot;
+ uint16_t ops_needed = burst_size - ops_unused;
+
+ /* Allocate crypto ops from pool */
+ if (ops_needed != rte_crypto_op_bulk_alloc(
+ ctx->crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops, ops_needed))
+ return -1;
+
+ /* Setup crypto op, attach mbuf etc */
+ (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
+ &ctx->mbufs_out[m_idx],
+ ops_needed, ctx->sess, ctx->options,
+ ctx->test_vector);
+
+ tsc_start = rte_rdtsc_precise();
+
+#ifdef CPERF_LINEARIZATION_ENABLE
+ if (linearize) {
+ /* PMD doesn't support scatter-gather and source buffer
+ * is segmented.
+ * We need to linearize it before enqueuing.
+ */
+ for (i = 0; i < burst_size; i++)
+ rte_pktmbuf_linearize(ops[i]->sym->m_src);
+ }
+#endif /* CPERF_LINEARIZATION_ENABLE */
+
+ /* Enqueue burst of ops on crypto device */
+ ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+ ops, burst_size);
+
+ /* Dequeue processed burst of ops from crypto device */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+ ops_processed, ctx->options->burst_sz);
+
+ tsc_end = rte_rdtsc_precise();
+
+ for (i = 0; i < ops_needed; i++) {
+ ctx->res[tsc_idx].tsc_start = tsc_start;
+ ops[i]->opaque_data = (void *)&ctx->res[tsc_idx];
+ tsc_idx++;
+ }
+
+ /*
+ * Calculate number of ops not enqueued (mainly for hw
+ * accelerators whose ingress queue can fill up).
+ */
+ ops_unused = burst_size - ops_enqd;
+
+ if (likely(ops_deqd)) {
+ /*
+ * free crypto ops so they can be reused. We don't free
+ * the mbufs here as we don't want to reuse them as
+ * the crypto operation will change the data and cause
+ * failures.
+ */
+ for (i = 0; i < ops_deqd; i++) {
+ pres = (struct cperf_op_result *)
+ (ops_processed[i]->opaque_data);
+ pres->status = ops_processed[i]->status;
+ pres->tsc_end = tsc_end;
+
+ rte_crypto_op_free(ops_processed[i]);
+ }
+
+ deqd_tot += ops_deqd;
+ deqd_max = max(ops_deqd, deqd_max);
+ deqd_min = min(ops_deqd, deqd_min);
+ }
+
+ enqd_tot += ops_enqd;
+ enqd_max = max(ops_enqd, enqd_max);
+ enqd_min = min(ops_enqd, enqd_min);
+
+ m_idx += ops_needed;
+ m_idx = m_idx + ctx->options->burst_sz > ctx->options->pool_sz ?
+ 0 : m_idx;
+ b_idx++;
+ }
+
+ /* Dequeue any operations still in the crypto device */
+ while (deqd_tot < ctx->options->total_ops) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+
+ /* dequeue burst */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+ ops_processed, ctx->options->burst_sz);
+
+ tsc_end = rte_rdtsc_precise();
+
+ if (ops_deqd != 0) {
+ for (i = 0; i < ops_deqd; i++) {
+ pres = (struct cperf_op_result *)
+ (ops_processed[i]->opaque_data);
+ pres->status = ops_processed[i]->status;
+ pres->tsc_end = tsc_end;
+
+ rte_crypto_op_free(ops_processed[i]);
+ }
+
+ deqd_tot += ops_deqd;
+ deqd_max = max(ops_deqd, deqd_max);
+ deqd_min = min(ops_deqd, deqd_min);
+ }
+ }
+
+ for (i = 0; i < tsc_idx; i++) {
+ tsc_val = ctx->res[i].tsc_end - ctx->res[i].tsc_start;
+ tsc_max = max(tsc_val, tsc_max);
+ tsc_min = min(tsc_val, tsc_min);
+ tsc_tot += tsc_val;
+ }
+
+ if (ctx->options->verify) {
+ struct rte_mbuf **mbufs;
+
+ if (ctx->options->out_of_place == 1)
+ mbufs = ctx->mbufs_out;
+ else
+ mbufs = ctx->mbufs_in;
+
+ for (i = 0; i < ctx->options->total_ops; i++) {
+
+ if (ctx->res[i].status != RTE_CRYPTO_OP_STATUS_SUCCESS
+ || cperf_latency_test_verifier(mbufs[i],
+ ctx->options,
+ ctx->test_vector)) {
+
+ ctx->results.ops_failed++;
+ }
+ }
+ }
+
+ ctx->results.enqd_tot = enqd_tot;
+ ctx->results.enqd_max = enqd_max;
+ ctx->results.enqd_min = enqd_min;
+
+ ctx->results.deqd_tot = deqd_tot;
+ ctx->results.deqd_max = deqd_max;
+ ctx->results.deqd_min = deqd_min;
+
+ ctx->results.cycles_tot = tsc_tot;
+ ctx->results.cycles_max = tsc_max;
+ ctx->results.cycles_min = tsc_min;
+
+ ctx->results.burst_num = b_idx;
+ ctx->results.num = tsc_idx;
+
+ return 0;
+}
+
+void
+cperf_latency_test_destructor(void *arg)
+{
+ struct cperf_latency_ctx *ctx = arg;
+ uint64_t i;
+ if (ctx == NULL)
+ return;
+ static int only_once;
+ uint64_t etot, eavg, emax, emin;
+ uint64_t dtot, davg, dmax, dmin;
+ uint64_t ctot, cavg, cmax, cmin;
+ double ttot, tavg, tmax, tmin;
+
+ const uint64_t tunit = 1000000; /* us */
+ const uint64_t tsc_hz = rte_get_tsc_hz();
+
+ etot = ctx->results.enqd_tot;
+ eavg = ctx->results.enqd_tot / ctx->results.burst_num;
+ emax = ctx->results.enqd_max;
+ emin = ctx->results.enqd_min;
+
+ dtot = ctx->results.deqd_tot;
+ davg = ctx->results.deqd_tot / ctx->results.burst_num;
+ dmax = ctx->results.deqd_max;
+ dmin = ctx->results.deqd_min;
+
+ ctot = ctx->results.cycles_tot;
+ cavg = ctx->results.cycles_tot / ctx->results.num;
+ cmax = ctx->results.cycles_max;
+ cmin = ctx->results.cycles_min;
+
+ ttot = tunit*(double)(ctot) / tsc_hz;
+ tavg = tunit*(double)(cavg) / tsc_hz;
+ tmax = tunit*(double)(cmax) / tsc_hz;
+ tmin = tunit*(double)(cmin) / tsc_hz;
+
+ if (ctx->options->csv) {
+ if (!only_once)
+ printf("\n# lcore, Pakt Seq #, Packet Size, cycles,"
+ " time (us)");
+
+ for (i = 0; i < ctx->options->total_ops; i++) {
+
+ printf("\n%u;%"PRIu64";%"PRIu64";%.3f",
+ ctx->lcore_id, i + 1,
+ ctx->res[i].tsc_end - ctx->res[i].tsc_start,
+ tunit * (double) (ctx->res[i].tsc_end
+ - ctx->res[i].tsc_start)
+ / tsc_hz);
+
+ }
+ only_once = 1;
+ } else {
+ printf("\n# Device %d on lcore %u\n", ctx->dev_id,
+ ctx->lcore_id);
+ printf("\n# total operations: %u", ctx->options->total_ops);
+ printf("\n# verified failed: %"PRIu64,
+ ctx->results.ops_failed);
+ printf("\n# burst number: %"PRIu64,
+ ctx->results.burst_num);
+ printf("\n#");
+ printf("\n# \t Total\t Average\t Maximum\t "
+ " Minimum");
+ printf("\n# enqueued\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
+ "%10"PRIu64, etot, eavg, emax, emin);
+ printf("\n# dequeued\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
+ "%10"PRIu64, dtot, davg, dmax, dmin);
+ printf("\n# cycles\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
+ "%10"PRIu64, ctot, cavg, cmax, cmin);
+ printf("\n# time [us]\t%12.0f\t%10.3f\t%10.3f\t%10.3f", ttot,
+ tavg, tmax, tmin);
+ printf("\n\n");
+
+ }
+ cperf_latency_test_free(ctx, ctx->options->pool_sz);
+
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_LATENCY_
+#define _CPERF_LATENCY_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+
+#include "cperf.h"
+#include "cperf_ops.h"
+#include "cperf_options.h"
+#include "cperf_test_vectors.h"
+
+void *
+cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ const struct cperf_op_fns *ops_fn);
+
+int
+cperf_latency_test_runner(void *test_ctx);
+
+void
+cperf_latency_test_destructor(void *test_ctx);
+
+#endif /* _CPERF_LATENCY_ */
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+
+#include "cperf_test_throughput.h"
+#include "cperf_ops.h"
+
+struct cperf_throughput_results {
+ uint64_t ops_enqueued;
+ uint64_t ops_dequeued;
+
+ uint64_t ops_enqueued_failed;
+ uint64_t ops_dequeued_failed;
+
+ uint64_t ops_failed;
+
+ double ops_per_second;
+ double throughput_gbps;
+ double cycles_per_byte;
+};
+
+struct cperf_throughput_ctx {
+ uint8_t dev_id;
+ uint16_t qp_id;
+ uint8_t lcore_id;
+
+ struct rte_mempool *pkt_mbuf_pool_in;
+ struct rte_mempool *pkt_mbuf_pool_out;
+ struct rte_mbuf **mbufs_in;
+ struct rte_mbuf **mbufs_out;
+
+ struct rte_mempool *crypto_op_pool;
+
+ struct rte_cryptodev_sym_session *sess;
+
+ cperf_populate_ops_t populate_ops;
+ cperf_verify_crypto_op_t verify_op_output;
+
+ const struct cperf_options *options;
+ const struct cperf_test_vector *test_vector;
+ struct cperf_throughput_results results;
+
+};
+
+struct cperf_op_result {
+ enum rte_crypto_op_status status;
+};
+
+static void
+cperf_throughput_test_free(struct cperf_throughput_ctx *ctx, uint32_t mbuf_nb)
+{
+ uint32_t i;
+
+ if (ctx) {
+ if (ctx->sess)
+ rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+
+ if (ctx->mbufs_in) {
+ for (i = 0; i < mbuf_nb; i++)
+ rte_pktmbuf_free(ctx->mbufs_in[i]);
+
+ rte_free(ctx->mbufs_in);
+ }
+
+ if (ctx->mbufs_out) {
+ for (i = 0; i < mbuf_nb; i++) {
+ if (ctx->mbufs_out[i] != NULL)
+ rte_pktmbuf_free(ctx->mbufs_out[i]);
+ }
+
+ rte_free(ctx->mbufs_out);
+ }
+
+ if (ctx->pkt_mbuf_pool_in)
+ rte_mempool_free(ctx->pkt_mbuf_pool_in);
+
+ if (ctx->pkt_mbuf_pool_out)
+ rte_mempool_free(ctx->pkt_mbuf_pool_out);
+
+ if (ctx->crypto_op_pool)
+ rte_mempool_free(ctx->crypto_op_pool);
+
+ rte_free(ctx);
+ }
+}
+
+static struct rte_mbuf *
+cperf_mbuf_create(struct rte_mempool *mempool,
+ uint32_t segments_nb,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ struct rte_mbuf *mbuf;
+ uint32_t segment_sz = options->buffer_sz / segments_nb;
+ uint32_t last_sz = options->buffer_sz % segments_nb;
+ uint8_t *mbuf_data;
+ uint8_t *test_data =
+ (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ test_vector->plaintext.data :
+ test_vector->ciphertext.data;
+
+ mbuf = rte_pktmbuf_alloc(mempool);
+ if (mbuf == NULL)
+ goto error;
+
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, segment_sz);
+ test_data += segment_sz;
+ segments_nb--;
+
+ while (segments_nb) {
+ struct rte_mbuf *m;
+
+ m = rte_pktmbuf_alloc(mempool);
+ if (m == NULL)
+ goto error;
+
+ rte_pktmbuf_chain(mbuf, m);
+
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, segment_sz);
+ test_data += segment_sz;
+ segments_nb--;
+ }
+
+ if (last_sz) {
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, last_sz);
+ }
+
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
+ options->auth_digest_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ if (options->op_type == CPERF_AEAD) {
+ uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
+ RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+
+ if (aead == NULL)
+ goto error;
+
+ memcpy(aead, test_vector->aad.data, test_vector->aad.length);
+ }
+
+ return mbuf;
+error:
+ if (mbuf != NULL)
+ rte_pktmbuf_free(mbuf);
+
+ return NULL;
+}
+
+void *
+cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ const struct cperf_op_fns *op_fns)
+{
+ struct cperf_throughput_ctx *ctx = NULL;
+ unsigned int mbuf_idx = 0;
+ char pool_name[32] = "";
+
+ ctx = rte_malloc(NULL, sizeof(struct cperf_throughput_ctx), 0);
+ if (ctx == NULL)
+ goto err;
+
+ ctx->dev_id = dev_id;
+ ctx->qp_id = qp_id;
+
+ ctx->populate_ops = op_fns->populate_ops;
+ ctx->options = options;
+ ctx->test_vector = test_vector;
+
+ ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ if (ctx->sess == NULL)
+ goto err;
+
+ snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
+ dev_id);
+
+ ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
+ options->pool_sz * options->segments_nb, 0, 0,
+ RTE_PKTMBUF_HEADROOM +
+ RTE_CACHE_LINE_ROUNDUP(
+ (options->buffer_sz / options->segments_nb) +
+ (options->buffer_sz % options->segments_nb) +
+ options->auth_digest_sz),
+ rte_socket_id());
+
+ if (ctx->pkt_mbuf_pool_in == NULL)
+ goto err;
+
+ /* Generate mbufs_in with plaintext populated for test */
+ if (ctx->options->pool_sz % ctx->options->burst_sz)
+ goto err;
+
+ ctx->mbufs_in = rte_malloc(NULL,
+ (sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0);
+
+ for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
+ ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
+ ctx->pkt_mbuf_pool_in, options->segments_nb,
+ options, test_vector);
+ if (ctx->mbufs_in[mbuf_idx] == NULL)
+ goto err;
+ }
+
+ if (options->out_of_place == 1) {
+
+ snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%d",
+ dev_id);
+
+ ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
+ pool_name, options->pool_sz, 0, 0,
+ RTE_PKTMBUF_HEADROOM +
+ RTE_CACHE_LINE_ROUNDUP(
+ options->buffer_sz +
+ options->auth_digest_sz),
+ rte_socket_id());
+
+ if (ctx->pkt_mbuf_pool_out == NULL)
+ goto err;
+ }
+
+ ctx->mbufs_out = rte_malloc(NULL,
+ (sizeof(struct rte_mbuf *) *
+ ctx->options->pool_sz), 0);
+
+ for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
+ if (options->out_of_place == 1) {
+ ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
+ ctx->pkt_mbuf_pool_out, 1,
+ options, test_vector);
+ if (ctx->mbufs_out[mbuf_idx] == NULL)
+ goto err;
+ } else {
+ ctx->mbufs_out[mbuf_idx] = NULL;
+ }
+ }
+
+ snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
+ dev_id);
+
+ ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
+ rte_socket_id());
+ if (ctx->crypto_op_pool == NULL)
+ goto err;
+
+ return ctx;
+err:
+ cperf_throughput_test_free(ctx, mbuf_idx);
+
+ return NULL;
+}
+
+static int
+cperf_throughput_test_verifier(struct rte_mbuf *mbuf,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *vector)
+{
+ const struct rte_mbuf *m;
+ uint32_t len;
+ uint16_t nb_segs;
+ uint8_t *data;
+ uint32_t cipher_offset, auth_offset;
+ uint8_t cipher, auth;
+ int res = 0;
+
+ m = mbuf;
+ nb_segs = m->nb_segs;
+ len = 0;
+ while (m && nb_segs != 0) {
+ len += m->data_len;
+ m = m->next;
+ nb_segs--;
+ }
+
+ data = rte_malloc(NULL, len, 0);
+ if (data == NULL)
+ return 1;
+
+ m = mbuf;
+ nb_segs = m->nb_segs;
+ len = 0;
+ while (m && nb_segs != 0) {
+ memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
+ m->data_len);
+ len += m->data_len;
+ m = m->next;
+ nb_segs--;
+ }
+
+ switch (options->op_type) {
+ case CPERF_CIPHER_ONLY:
+ cipher = 1;
+ cipher_offset = 0;
+ auth = 0;
+ auth_offset = 0;
+ break;
+ case CPERF_CIPHER_THEN_AUTH:
+ cipher = 1;
+ cipher_offset = 0;
+ auth = 1;
+ auth_offset = vector->plaintext.length;
+ break;
+ case CPERF_AUTH_ONLY:
+ cipher = 0;
+ cipher_offset = 0;
+ auth = 1;
+ auth_offset = vector->plaintext.length;
+ break;
+ case CPERF_AUTH_THEN_CIPHER:
+ cipher = 1;
+ cipher_offset = 0;
+ auth = 1;
+ auth_offset = vector->plaintext.length;
+ break;
+ case CPERF_AEAD:
+ cipher = 1;
+ cipher_offset = vector->aad.length;
+ auth = 1;
+ auth_offset = vector->aad.length + vector->plaintext.length;
+ break;
+ }
+
+ if (cipher == 1) {
+ if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ res += memcmp(data + cipher_offset,
+ vector->ciphertext.data,
+ vector->ciphertext.length);
+ else
+ res += memcmp(data + cipher_offset,
+ vector->plaintext.data,
+ vector->plaintext.length);
+ }
+
+ if (auth == 1) {
+ if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ res += memcmp(data + auth_offset,
+ vector->digest.data,
+ vector->digest.length);
+ }
+
+ if (res != 0)
+ res = 1;
+
+ return res;
+}
+
+int
+cperf_throughput_test_runner(void *test_ctx)
+{
+ struct cperf_throughput_ctx *ctx = test_ctx;
+ struct cperf_op_result *res, *pres;
+
+ if (ctx->options->verify) {
+ res = rte_malloc(NULL, sizeof(struct cperf_op_result) *
+ ctx->options->total_ops, 0);
+ if (res == NULL)
+ return 0;
+ }
+
+ uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
+ uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
+
+ uint64_t i, m_idx = 0, tsc_start, tsc_end, tsc_duration;
+
+ uint16_t ops_unused = 0;
+ uint64_t idx = 0;
+
+ struct rte_crypto_op *ops[ctx->options->burst_sz];
+ struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
+
+ uint32_t lcore = rte_lcore_id();
+
+#ifdef CPERF_LINEARIZATION_ENABLE
+ struct rte_cryptodev_info dev_info;
+ int linearize = 0;
+
+ /* Check if source mbufs require coalescing */
+ if (ctx->options->segments_nb > 1) {
+ rte_cryptodev_info_get(ctx->dev_id, &dev_info);
+ if ((dev_info.feature_flags &
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
+ linearize = 1;
+ }
+#endif /* CPERF_LINEARIZATION_ENABLE */
+
+ ctx->lcore_id = lcore;
+
+ if (!ctx->options->csv)
+ printf("\n# Running throughput test on device: %u, lcore: %u\n",
+ ctx->dev_id, lcore);
+
+ /* Warm up the host CPU before starting the test */
+ for (i = 0; i < ctx->options->total_ops; i++)
+ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+
+ tsc_start = rte_rdtsc_precise();
+
+ while (ops_enqd_total < ctx->options->total_ops) {
+
+ uint16_t burst_size = ((ops_enqd_total + ctx->options->burst_sz)
+ <= ctx->options->total_ops) ?
+ ctx->options->burst_sz :
+ ctx->options->total_ops -
+ ops_enqd_total;
+
+ uint16_t ops_needed = burst_size - ops_unused;
+
+ /* Allocate crypto ops from pool */
+ if (ops_needed != rte_crypto_op_bulk_alloc(
+ ctx->crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops, ops_needed))
+ return -1;
+
+ /* Setup crypto op, attach mbuf etc */
+ (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
+ &ctx->mbufs_out[m_idx],
+ ops_needed, ctx->sess, ctx->options,
+ ctx->test_vector);
+
+ if (ctx->options->verify) {
+ for (i = 0; i < ops_needed; i++) {
+ ops[i]->opaque_data = (void *)&res[idx];
+ idx++;
+ }
+ }
+
+#ifdef CPERF_LINEARIZATION_ENABLE
+ if (linearize) {
+ /* PMD doesn't support scatter-gather and source buffer
+ * is segmented.
+ * We need to linearize it before enqueuing.
+ */
+ for (i = 0; i < burst_size; i++)
+ rte_pktmbuf_linearize(ops[i]->sym->m_src);
+ }
+#endif /* CPERF_LINEARIZATION_ENABLE */
+
+ /* Enqueue burst of ops on crypto device */
+ ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+ ops, burst_size);
+ if (ops_enqd < burst_size)
+ ops_enqd_failed++;
+
+ /**
+ * Calculate number of ops not enqueued (mainly for hw
+ * accelerators whose ingress queue can fill up).
+ */
+ ops_unused = burst_size - ops_enqd;
+ ops_enqd_total += ops_enqd;
+
+
+ /* Dequeue processed burst of ops from crypto device */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+ ops_processed, ctx->options->burst_sz);
+
+ if (likely(ops_deqd)) {
+
+ if (ctx->options->verify) {
+ void *opq;
+ for (i = 0; i < ops_deqd; i++) {
+ opq = (ops_processed[i]->opaque_data);
+ pres = (struct cperf_op_result *)opq;
+ pres->status = ops_processed[i]->status;
+ }
+ }
+
+ /* free crypto ops so they can be reused. We don't free
+ * the mbufs here as we don't want to reuse them as
+ * the crypto operation will change the data and cause
+ * failures.
+ */
+ for (i = 0; i < ops_deqd; i++)
+ rte_crypto_op_free(ops_processed[i]);
+
+ ops_deqd_total += ops_deqd;
+ } else {
+ /**
+ * Count dequeue polls which didn't return any
+ * processed operations. This statistic is mainly
+ * relevant to hw accelerators.
+ */
+ ops_deqd_failed++;
+ }
+
+ m_idx += ops_needed;
+ m_idx = m_idx + ctx->options->burst_sz > ctx->options->pool_sz ?
+ 0 : m_idx;
+ }
+
+ /* Dequeue any operations still in the crypto device */
+
+ while (ops_deqd_total < ctx->options->total_ops) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+
+ /* dequeue burst */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+ ops_processed, ctx->options->burst_sz);
+ if (ops_deqd == 0)
+ ops_deqd_failed++;
+ else {
+ if (ctx->options->verify) {
+ void *opq;
+ for (i = 0; i < ops_deqd; i++) {
+ opq = (ops_processed[i]->opaque_data);
+ pres = (struct cperf_op_result *)opq;
+ pres->status = ops_processed[i]->status;
+ }
+ }
+
+ for (i = 0; i < ops_deqd; i++)
+ rte_crypto_op_free(ops_processed[i]);
+
+ ops_deqd_total += ops_deqd;
+ }
+ }
+
+ tsc_end = rte_rdtsc_precise();
+ tsc_duration = (tsc_end - tsc_start);
+
+ if (ctx->options->verify) {
+ struct rte_mbuf **mbufs;
+
+ if (ctx->options->out_of_place == 1)
+ mbufs = ctx->mbufs_out;
+ else
+ mbufs = ctx->mbufs_in;
+
+ for (i = 0; i < ctx->options->total_ops; i++) {
+
+ if (res[i].status != RTE_CRYPTO_OP_STATUS_SUCCESS ||
+ cperf_throughput_test_verifier(
+ mbufs[i], ctx->options,
+ ctx->test_vector)) {
+
+ ctx->results.ops_failed++;
+ }
+ }
+
+ rte_free(res);
+ }
+
+ /* Calculate average operations processed per second */
+ ctx->results.ops_per_second = ((double)ctx->options->total_ops /
+ tsc_duration) * rte_get_tsc_hz();
+
+ /* Calculate average throughput (Gbps) in bits per second */
+ ctx->results.throughput_gbps = ((ctx->results.ops_per_second *
+ ctx->options->buffer_sz * 8) / 1000000000);
+
+
+ /* Calculate average cycles per byte */
+ ctx->results.cycles_per_byte = ((double)tsc_duration /
+ ctx->options->total_ops) / ctx->options->buffer_sz;
+
+ ctx->results.ops_enqueued = ops_enqd_total;
+ ctx->results.ops_dequeued = ops_deqd_total;
+
+ ctx->results.ops_enqueued_failed = ops_enqd_failed;
+ ctx->results.ops_dequeued_failed = ops_deqd_failed;
+
+ return 0;
+}
+
+
+
+void
+cperf_throughput_test_destructor(void *arg)
+{
+ struct cperf_throughput_ctx *ctx = arg;
+ struct cperf_throughput_results *results = &ctx->results;
+ static int only_once;
+
+ if (ctx == NULL)
+ return;
+
+ if (!ctx->options->csv) {
+ printf("\n# Device %d on lcore %u\n",
+ ctx->dev_id, ctx->lcore_id);
+ printf("# Buffer Size(B)\t Enqueued\t Dequeued\tFailed Enq"
+ "\tFailed Deq\tOps(Millions)\tThroughput(Gbps)"
+ "\tCycles Per Byte\n");
+
+ printf("\n%16u\t%10"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
+ "%10"PRIu64"\t%16.4f\t%16.4f\t%15.2f\n",
+ ctx->options->buffer_sz,
+ results->ops_enqueued,
+ results->ops_dequeued,
+ results->ops_enqueued_failed,
+ results->ops_dequeued_failed,
+ results->ops_per_second/1000000,
+ results->throughput_gbps,
+ results->cycles_per_byte);
+ } else {
+ if (!only_once)
+ printf("\n# CPU lcore id, Burst Size(B), "
+ "Buffer Size(B),Enqueued,Dequeued,Failed Enq,"
+ "Failed Deq,Ops(Millions),Throughput(Gbps),"
+ "Cycles Per Byte\n");
+ only_once = 1;
+
+ printf("%u;%u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
+ "%.f3;%.f3;%.f3\n",
+ ctx->lcore_id,
+ ctx->options->burst_sz,
+ ctx->options->buffer_sz,
+ results->ops_enqueued,
+ results->ops_dequeued,
+ results->ops_enqueued_failed,
+ results->ops_dequeued_failed,
+ results->ops_per_second/1000000,
+ results->throughput_gbps,
+ results->cycles_per_byte);
+ }
+
+ cperf_throughput_test_free(ctx, ctx->options->pool_sz);
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_THROUGHPUT_
+#define _CPERF_THROUGHPUT_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+
+#include "cperf.h"
+#include "cperf_ops.h"
+#include "cperf_options.h"
+#include "cperf_test_vectors.h"
+
+
+void *
+cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ const struct cperf_op_fns *ops_fn);
+
+int
+cperf_throughput_test_runner(void *test_ctx);
+
+void
+cperf_throughput_test_destructor(void *test_ctx);
+
+#endif /* _CPERF_THROUGHPUT_ */
--- /dev/null
+#include <stdio.h>
+
+#include <rte_malloc.h>
+
+#include "cperf_options.h"
+#include "cperf_test_vectors.h"
+#include "cperf_test_vector_parsing.h"
+
+int
+free_test_vector(struct cperf_test_vector *vector, struct cperf_options *opts)
+{
+ if (vector == NULL || opts == NULL)
+ return -1;
+
+ rte_free(vector->iv.data);
+ rte_free(vector->aad.data);
+ rte_free(vector->digest.data);
+
+ if (opts->test_file != NULL) {
+ rte_free(vector->plaintext.data);
+ rte_free(vector->cipher_key.data);
+ rte_free(vector->auth_key.data);
+ rte_free(vector->ciphertext.data);
+ }
+
+ rte_free(vector);
+
+ return 0;
+}
+
+void
+show_test_vector(struct cperf_test_vector *test_vector)
+{
+ const uint8_t wrap = 32;
+ uint32_t i;
+
+ if (test_vector == NULL)
+ return;
+
+ if (test_vector->plaintext.data) {
+ printf("\nplaintext =\n");
+ for (i = 0; i < test_vector->plaintext.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == test_vector->plaintext.length - 1)
+ printf("0x%02x",
+ test_vector->plaintext.data[i]);
+ else
+ printf("0x%02x, ",
+ test_vector->plaintext.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->cipher_key.data) {
+ printf("\ncipher_key =\n");
+ for (i = 0; i < test_vector->cipher_key.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->cipher_key.length - 1))
+ printf("0x%02x",
+ test_vector->cipher_key.data[i]);
+ else
+ printf("0x%02x, ",
+ test_vector->cipher_key.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->auth_key.data) {
+ printf("\nauth_key =\n");
+ for (i = 0; i < test_vector->auth_key.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->auth_key.length - 1))
+ printf("0x%02x", test_vector->auth_key.data[i]);
+ else
+ printf("0x%02x, ",
+ test_vector->auth_key.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->iv.data) {
+ printf("\niv =\n");
+ for (i = 0; i < test_vector->iv.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->iv.length - 1))
+ printf("0x%02x", test_vector->iv.data[i]);
+ else
+ printf("0x%02x, ", test_vector->iv.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->ciphertext.data) {
+ printf("\nciphertext =\n");
+ for (i = 0; i < test_vector->ciphertext.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == test_vector->ciphertext.length - 1)
+ printf("0x%02x",
+ test_vector->ciphertext.data[i]);
+ else
+ printf("0x%02x, ",
+ test_vector->ciphertext.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->aad.data) {
+ printf("\naad =\n");
+ for (i = 0; i < test_vector->aad.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->aad.length - 1))
+ printf("0x%02x", test_vector->aad.data[i]);
+ else
+ printf("0x%02x, ", test_vector->aad.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->digest.data) {
+ printf("\ndigest =\n");
+ for (i = 0; i < test_vector->digest.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->digest.length - 1))
+ printf("0x%02x", test_vector->digest.data[i]);
+ else
+ printf("0x%02x, ", test_vector->digest.data[i]);
+ }
+ printf("\n");
+ }
+}
+
+/* trim leading and trailing spaces */
+static char *
+trim_space(char *str)
+{
+ char *start, *end;
+
+ for (start = str; *start; start++) {
+ if (!isspace((unsigned char) start[0]))
+ break;
+ }
+
+ for (end = start + strlen(start); end > start + 1; end--) {
+ if (!isspace((unsigned char) end[-1]))
+ break;
+ }
+
+ *end = 0;
+
+ /* Shift from "start" to the beginning of the string */
+ if (start > str)
+ memmove(str, start, (end - start) + 1);
+
+ return str;
+}
+
+/* tokenization test values separated by a comma */
+static int
+parse_values(char *tokens, uint8_t **data, uint32_t *data_length)
+{
+ uint32_t n_tokens;
+ uint32_t data_size = 32;
+
+ uint8_t *values, *values_resized;
+ char *tok, *error = NULL;
+
+ tok = strtok(tokens, CPERF_VALUE_DELIMITER);
+ if (tok == NULL)
+ return -1;
+
+ values = (uint8_t *) rte_zmalloc(NULL, sizeof(uint8_t) * data_size, 0);
+ if (values == NULL)
+ return -1;
+
+ n_tokens = 0;
+ while (tok != NULL) {
+ values_resized = NULL;
+
+ if (n_tokens >= data_size) {
+ data_size *= 2;
+
+ values_resized = (uint8_t *) rte_realloc(values,
+ sizeof(uint8_t) * data_size, 0);
+ if (values_resized == NULL) {
+ rte_free(values);
+ return -1;
+ }
+ values = values_resized;
+ }
+
+ values[n_tokens] = (uint8_t) strtoul(tok, &error, 0);
+ if ((error == NULL) || (*error != '\0')) {
+ printf("Failed with convert '%s'\n", tok);
+ rte_free(values);
+ return -1;
+ }
+
+ tok = strtok(NULL, CPERF_VALUE_DELIMITER);
+ if (tok == NULL)
+ break;
+
+ n_tokens++;
+ }
+
+ values_resized = (uint8_t *) rte_realloc(values,
+ sizeof(uint8_t) * (n_tokens + 1), 0);
+
+ if (values_resized == NULL) {
+ rte_free(values);
+ return -1;
+ }
+
+ *data = values_resized;
+ *data_length = n_tokens + 1;
+
+ return 0;
+}
+
+/* checks the type of key and assigns data */
+static int
+parse_entry(char *entry, struct cperf_test_vector *vector,
+ struct cperf_options *opts, uint8_t tc_found)
+{
+ int status;
+ uint32_t data_length;
+
+ uint8_t *data = NULL;
+ char *token, *key_token;
+
+ /* get key */
+ token = strtok(entry, CPERF_ENTRY_DELIMITER);
+ key_token = token;
+
+ /* get values for key */
+ token = strtok(NULL, CPERF_ENTRY_DELIMITER);
+ if (token == NULL) {
+ printf("Expected 'key = values' but was '%.40s'..\n",
+ key_token);
+ return -1;
+ }
+
+ status = parse_values(token, &data, &data_length);
+ if (status)
+ return -1;
+
+ /* compare keys */
+ if (strstr(key_token, "plaintext")) {
+ rte_free(vector->plaintext.data);
+ vector->plaintext.data = data;
+ if (tc_found)
+ vector->plaintext.length = data_length;
+ else {
+ if (opts->buffer_sz > data_length) {
+ printf("Global plaintext shorter than "
+ "buffer_sz\n");
+ return -1;
+ }
+ vector->plaintext.length = opts->buffer_sz;
+ }
+
+ } else if (strstr(key_token, "cipher_key")) {
+ rte_free(vector->cipher_key.data);
+ vector->cipher_key.data = data;
+ if (tc_found)
+ vector->cipher_key.length = data_length;
+ else {
+ if (opts->cipher_key_sz > data_length) {
+ printf("Global cipher_key shorter than "
+ "cipher_key_sz\n");
+ return -1;
+ }
+ vector->cipher_key.length = opts->cipher_key_sz;
+ }
+
+ } else if (strstr(key_token, "auth_key")) {
+ rte_free(vector->auth_key.data);
+ vector->auth_key.data = data;
+ if (tc_found)
+ vector->auth_key.length = data_length;
+ else {
+ if (opts->auth_key_sz > data_length) {
+ printf("Global auth_key shorter than "
+ "auth_key_sz\n");
+ return -1;
+ }
+ vector->auth_key.length = opts->auth_key_sz;
+ }
+
+ } else if (strstr(key_token, "iv")) {
+ rte_free(vector->iv.data);
+ vector->iv.data = data;
+ vector->iv.phys_addr = rte_malloc_virt2phy(vector->iv.data);
+ if (tc_found)
+ vector->iv.length = data_length;
+ else {
+ if (opts->cipher_iv_sz > data_length) {
+ printf("Global iv shorter than "
+ "cipher_iv_sz\n");
+ return -1;
+ }
+ vector->iv.length = opts->cipher_iv_sz;
+ }
+
+ } else if (strstr(key_token, "ciphertext")) {
+ rte_free(vector->ciphertext.data);
+ vector->ciphertext.data = data;
+ if (tc_found)
+ vector->ciphertext.length = data_length;
+ else {
+ if (opts->buffer_sz > data_length) {
+ printf("Global ciphertext shorter than "
+ "buffer_sz\n");
+ return -1;
+ }
+ vector->ciphertext.length = opts->buffer_sz;
+ }
+
+ } else if (strstr(key_token, "aad")) {
+ rte_free(vector->aad.data);
+ vector->aad.data = data;
+ vector->aad.phys_addr = rte_malloc_virt2phy(vector->aad.data);
+ if (tc_found)
+ vector->aad.length = data_length;
+ else {
+ if (opts->auth_aad_sz > data_length) {
+ printf("Global aad shorter than "
+ "auth_aad_sz\n");
+ return -1;
+ }
+ vector->aad.length = opts->auth_aad_sz;
+ }
+
+ } else if (strstr(key_token, "digest")) {
+ rte_free(vector->digest.data);
+ vector->digest.data = data;
+ vector->digest.phys_addr = rte_malloc_virt2phy(
+ vector->digest.data);
+ if (tc_found)
+ vector->digest.length = data_length;
+ else {
+ if (opts->auth_digest_sz > data_length) {
+ printf("Global digest shorter than "
+ "auth_digest_sz\n");
+ return -1;
+ }
+ vector->digest.length = opts->auth_digest_sz;
+ }
+ } else {
+ printf("Not valid key: '%s'\n", trim_space(key_token));
+ return -1;
+ }
+
+ return 0;
+}
+
+/* searches in the file for test keys and values */
+static int
+parse_file(struct cperf_test_vector *vector, struct cperf_options *opts)
+{
+ uint8_t tc_found = 0;
+ uint8_t tc_data_start = 0;
+ ssize_t read;
+ size_t len = 0;
+ int status = 0;
+
+ FILE *fp;
+ char *line = NULL;
+ char *entry = NULL;
+
+ fp = fopen(opts->test_file, "r");
+ if (fp == NULL) {
+ printf("File %s does not exists\n", opts->test_file);
+ return -1;
+ }
+
+ while ((read = getline(&line, &len, fp)) != -1) {
+
+ /* ignore comments and new lines */
+ if (line[0] == '#' || line[0] == '/' || line[0] == '\n'
+ || line[0] == '\r' || line[0] == ' ')
+ continue;
+
+ trim_space(line);
+
+ /* next test case is started */
+ if (line[0] == '[' && line[strlen(line) - 1] == ']' && tc_found)
+ break;
+ /* test case section started, end of global data */
+ else if (line[0] == '[' && line[strlen(line) - 1] == ']')
+ tc_data_start = 1;
+
+ /* test name unspecified, end after global data */
+ if (tc_data_start && opts->test_name == NULL)
+ break;
+ /* searching for a suitable test */
+ else if (tc_data_start && tc_found == 0) {
+ if (!strcmp(line, opts->test_name)) {
+ tc_found = 1;
+ continue;
+ } else
+ continue;
+ }
+
+ /* buffer for multiline */
+ entry = (char *) rte_realloc(entry,
+ sizeof(char) * strlen(line) + 1, 0);
+ if (entry == NULL)
+ return -1;
+
+ memset(entry, 0, strlen(line) + 1);
+ strncpy(entry, line, strlen(line));
+
+ /* check if entry ends with , or = */
+ if (entry[strlen(entry) - 1] == ','
+ || entry[strlen(entry) - 1] == '=') {
+ while ((read = getline(&line, &len, fp)) != -1) {
+ trim_space(line);
+
+ /* extend entry about length of new line */
+ char *entry_extended = (char *) rte_realloc(
+ entry, sizeof(char)
+ * (strlen(line) + strlen(entry))
+ + 1, 0);
+
+ if (entry_extended == NULL)
+ goto err;
+ entry = entry_extended;
+
+ strncat(entry, line, strlen(line));
+
+ if (entry[strlen(entry) - 1] != ',')
+ break;
+ }
+ }
+ status = parse_entry(entry, vector, opts, tc_found);
+ if (status) {
+ printf("An error occurred while parsing!\n");
+ goto err;
+ }
+ }
+
+ if (tc_found == 0 && opts->test_name != NULL) {
+ printf("Not found '%s' case in test file\n", opts->test_name);
+ goto err;
+ }
+
+ fclose(fp);
+ free(line);
+ rte_free(entry);
+
+ return 0;
+
+err:
+ if (fp)
+ fclose(fp);
+ if (line)
+ free(line);
+ if (entry)
+ rte_free(entry);
+
+ return -1;
+}
+
+struct cperf_test_vector*
+cperf_test_vector_get_from_file(struct cperf_options *opts)
+{
+ int status;
+ struct cperf_test_vector *test_vector = NULL;
+
+ if (opts == NULL || opts->test_file == NULL)
+ return test_vector;
+
+ test_vector = (struct cperf_test_vector *) rte_zmalloc(NULL,
+ sizeof(struct cperf_test_vector), 0);
+ if (test_vector == NULL)
+ return test_vector;
+
+ /* filling the vector with data from a file */
+ status = parse_file(test_vector, opts);
+ if (status) {
+ free_test_vector(test_vector, opts);
+ return NULL;
+ }
+
+ /* other values not included in the file */
+ test_vector->data.cipher_offset = 0;
+ test_vector->data.cipher_length = opts->buffer_sz;
+
+ test_vector->data.auth_offset = 0;
+ test_vector->data.auth_length = opts->buffer_sz;
+
+ return test_vector;
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef APP_CRYPTO_PERF_CPERF_TEST_VECTOR_PARSING_H_
+#define APP_CRYPTO_PERF_CPERF_TEST_VECTOR_PARSING_H_
+
+#define CPERF_VALUE_DELIMITER ","
+#define CPERF_ENTRY_DELIMITER "="
+
+/**
+ * Frees the allocated memory for test vector
+ *
+ * @param vector
+ * Destination vector test to release
+ * @param opts
+ * Test options
+ * @return
+ * 0 on success, (-1) on error.
+ */
+int
+free_test_vector(struct cperf_test_vector *vector, struct cperf_options *opts);
+
+/**
+ * Displays data in test vector
+ *
+ * @param vector
+ * Vector to display
+ */
+void
+show_test_vector(struct cperf_test_vector *test_vector);
+
+/**
+ * Completes test vector with data from file
+ *
+ * @param opts
+ * Test options
+ * @return
+ * NULL on error.
+ * Test vector pointer on successful.
+ */
+struct cperf_test_vector*
+cperf_test_vector_get_from_file(struct cperf_options *opts);
+
+#endif /* APP_CRYPTO_PERF_CPERF_TEST_VECTOR_PARSING_H_ */
--- /dev/null
+#include <rte_crypto.h>
+#include <rte_malloc.h>
+
+#include "cperf_test_vectors.h"
+
+uint8_t plaintext[2048] = {
+ 0x71, 0x75, 0x83, 0x98, 0x75, 0x42, 0x51, 0x09, 0x94, 0x02, 0x13, 0x20,
+ 0x15, 0x64, 0x46, 0x32, 0x08, 0x18, 0x91, 0x82, 0x86, 0x52, 0x23, 0x93,
+ 0x44, 0x54, 0x28, 0x68, 0x78, 0x78, 0x70, 0x06, 0x42, 0x74, 0x41, 0x27,
+ 0x73, 0x38, 0x53, 0x77, 0x51, 0x96, 0x53, 0x24, 0x03, 0x88, 0x74, 0x14,
+ 0x70, 0x23, 0x88, 0x30, 0x85, 0x18, 0x89, 0x27, 0x41, 0x71, 0x61, 0x23,
+ 0x04, 0x83, 0x30, 0x57, 0x26, 0x47, 0x23, 0x75, 0x25, 0x62, 0x53, 0x80,
+ 0x38, 0x34, 0x21, 0x33, 0x34, 0x51, 0x46, 0x29, 0x94, 0x64, 0x22, 0x67,
+ 0x25, 0x45, 0x70, 0x26, 0x74, 0x39, 0x46, 0x71, 0x08, 0x85, 0x27, 0x18,
+ 0x93, 0x39, 0x72, 0x11, 0x57, 0x26, 0x88, 0x46, 0x47, 0x49, 0x86, 0x92,
+ 0x03, 0x37, 0x96, 0x40, 0x84, 0x53, 0x67, 0x47, 0x60, 0x60, 0x37, 0x67,
+ 0x02, 0x68, 0x76, 0x62, 0x42, 0x01, 0x59, 0x11, 0x01, 0x89, 0x40, 0x87,
+ 0x58, 0x20, 0x51, 0x21, 0x66, 0x26, 0x26, 0x73, 0x03, 0x06, 0x14, 0x25,
+ 0x98, 0x42, 0x44, 0x67, 0x24, 0x78, 0x71, 0x45, 0x32, 0x61, 0x20, 0x26,
+ 0x08, 0x88, 0x44, 0x26, 0x40, 0x63, 0x76, 0x23, 0x78, 0x55, 0x81, 0x97,
+ 0x95, 0x89, 0x39, 0x07, 0x14, 0x50, 0x50, 0x73, 0x07, 0x20, 0x86, 0x83,
+ 0x74, 0x57, 0x72, 0x36, 0x68, 0x61, 0x14, 0x41, 0x56, 0x49, 0x64, 0x72,
+ 0x75, 0x81, 0x47, 0x91, 0x08, 0x76, 0x47, 0x06, 0x55, 0x77, 0x61, 0x45,
+ 0x50, 0x10, 0x07, 0x46, 0x46, 0x89, 0x80, 0x07, 0x24, 0x95, 0x39, 0x43,
+ 0x03, 0x75, 0x24, 0x35, 0x57, 0x82, 0x09, 0x64, 0x29, 0x24, 0x26, 0x66,
+ 0x67, 0x29, 0x05, 0x90, 0x82, 0x02, 0x45, 0x71, 0x21, 0x34, 0x25, 0x48,
+ 0x68, 0x26, 0x01, 0x18, 0x73, 0x18, 0x46, 0x15, 0x14, 0x33, 0x28, 0x44,
+ 0x24, 0x82, 0x20, 0x12, 0x99, 0x43, 0x68, 0x43, 0x25, 0x14, 0x34, 0x33,
+ 0x31, 0x13, 0x77, 0x44, 0x95, 0x22, 0x99, 0x02, 0x30, 0x50, 0x74, 0x43,
+ 0x81, 0x78, 0x32, 0x17, 0x09, 0x85, 0x04, 0x37, 0x31, 0x98, 0x76, 0x79,
+ 0x64, 0x10, 0x39, 0x89, 0x59, 0x90, 0x50, 0x15, 0x77, 0x39, 0x28, 0x14,
+ 0x30, 0x19, 0x68, 0x77, 0x89, 0x48, 0x86, 0x16, 0x11, 0x33, 0x84, 0x56,
+ 0x10, 0x20, 0x94, 0x72, 0x41, 0x69, 0x13, 0x00, 0x56, 0x27, 0x01, 0x57,
+ 0x46, 0x65, 0x65, 0x19, 0x33, 0x07, 0x62, 0x19, 0x91, 0x60, 0x29, 0x11,
+ 0x41, 0x25, 0x88, 0x21, 0x93, 0x85, 0x87, 0x40, 0x91, 0x25, 0x32, 0x86,
+ 0x76, 0x54, 0x92, 0x52, 0x72, 0x46, 0x61, 0x84, 0x20, 0x14, 0x65, 0x83,
+ 0x69, 0x90, 0x80, 0x11, 0x35, 0x70, 0x42, 0x64, 0x74, 0x85, 0x15, 0x23,
+ 0x06, 0x55, 0x67, 0x49, 0x76, 0x47, 0x11, 0x95, 0x00, 0x85, 0x05, 0x12,
+ 0x58, 0x53, 0x25, 0x73, 0x62, 0x81, 0x63, 0x82, 0x32, 0x75, 0x16, 0x48,
+ 0x04, 0x96, 0x75, 0x16, 0x43, 0x83, 0x41, 0x85, 0x95, 0x67, 0x27, 0x83,
+ 0x22, 0x43, 0x02, 0x27, 0x69, 0x62, 0x78, 0x50, 0x57, 0x66, 0x99, 0x89,
+ 0x05, 0x06, 0x35, 0x86, 0x37, 0x27, 0x48, 0x46, 0x50, 0x80, 0x96, 0x40,
+ 0x42, 0x36, 0x21, 0x54, 0x49, 0x18, 0x63, 0x38, 0x45, 0x76, 0x23, 0x20,
+ 0x28, 0x06, 0x17, 0x32, 0x58, 0x50, 0x49, 0x54, 0x29, 0x46, 0x18, 0x12,
+ 0x17, 0x50, 0x02, 0x80, 0x99, 0x53, 0x15, 0x02, 0x07, 0x14, 0x19, 0x60,
+ 0x56, 0x43, 0x76, 0x71, 0x49, 0x99, 0x54, 0x83, 0x28, 0x94, 0x30, 0x30,
+ 0x57, 0x05, 0x89, 0x80, 0x11, 0x03, 0x78, 0x35, 0x73, 0x52, 0x67, 0x39,
+ 0x67, 0x07, 0x04, 0x49, 0x23, 0x83, 0x86, 0x89, 0x57, 0x71, 0x08, 0x41,
+ 0x15, 0x97, 0x19, 0x72, 0x03, 0x27, 0x72, 0x52, 0x66, 0x67, 0x99, 0x15,
+ 0x33, 0x64, 0x69, 0x78, 0x07, 0x83, 0x53, 0x71, 0x21, 0x50, 0x05, 0x48,
+ 0x59, 0x85, 0x01, 0x36, 0x65, 0x02, 0x52, 0x01, 0x09, 0x49, 0x28, 0x77,
+ 0x25, 0x35, 0x67, 0x77, 0x81, 0x64, 0x24, 0x29, 0x42, 0x32, 0x59, 0x22,
+ 0x93, 0x48, 0x59, 0x03, 0x85, 0x87, 0x15, 0x55, 0x23, 0x42, 0x58, 0x17,
+ 0x18, 0x37, 0x70, 0x83, 0x80, 0x12, 0x44, 0x83, 0x45, 0x70, 0x55, 0x86,
+ 0x03, 0x23, 0x01, 0x56, 0x94, 0x12, 0x41, 0x34, 0x82, 0x90, 0x83, 0x46,
+ 0x17, 0x56, 0x66, 0x96, 0x75, 0x80, 0x59, 0x07, 0x15, 0x84, 0x19, 0x52,
+ 0x37, 0x44, 0x44, 0x83, 0x72, 0x43, 0x25, 0x42, 0x26, 0x86, 0x87, 0x86,
+ 0x91, 0x62, 0x14, 0x90, 0x34, 0x26, 0x14, 0x33, 0x59, 0x70, 0x73, 0x15,
+ 0x49, 0x40, 0x66, 0x88, 0x42, 0x66, 0x16, 0x42, 0x55, 0x92, 0x82, 0x06,
+ 0x20, 0x96, 0x36, 0x96, 0x13, 0x07, 0x84, 0x94, 0x37, 0x66, 0x62, 0x78,
+ 0x60, 0x58, 0x80, 0x50, 0x69, 0x03, 0x97, 0x16, 0x64, 0x45, 0x21, 0x39,
+ 0x79, 0x28, 0x52, 0x17, 0x14, 0x77, 0x31, 0x60, 0x86, 0x70, 0x09, 0x53,
+ 0x39, 0x32, 0x52, 0x31, 0x35, 0x79, 0x24, 0x70, 0x25, 0x48, 0x23, 0x49,
+ 0x10, 0x64, 0x54, 0x30, 0x82, 0x34, 0x51, 0x20, 0x46, 0x04, 0x29, 0x25,
+ 0x65, 0x09, 0x55, 0x30, 0x30, 0x52, 0x85, 0x32, 0x79, 0x19, 0x59, 0x07,
+ 0x05, 0x12, 0x11, 0x03, 0x21, 0x90, 0x36, 0x62, 0x23, 0x67, 0x36, 0x67,
+ 0x47, 0x39, 0x92, 0x88, 0x45, 0x43, 0x71, 0x16, 0x48, 0x27, 0x68, 0x39,
+ 0x98, 0x38, 0x03, 0x31, 0x85, 0x10, 0x06, 0x95, 0x54, 0x79, 0x28, 0x79,
+ 0x56, 0x16, 0x65, 0x69, 0x00, 0x54, 0x09, 0x91, 0x06, 0x10, 0x10, 0x86,
+ 0x75, 0x01, 0x02, 0x71, 0x01, 0x09, 0x32, 0x94, 0x66, 0x43, 0x68, 0x36,
+ 0x19, 0x52, 0x02, 0x04, 0x45, 0x49, 0x40, 0x94, 0x07, 0x87, 0x86, 0x79,
+ 0x84, 0x07, 0x75, 0x30, 0x73, 0x02, 0x57, 0x81, 0x65, 0x02, 0x28, 0x96,
+ 0x57, 0x07, 0x70, 0x34, 0x39, 0x35, 0x75, 0x19, 0x47, 0x57, 0x08, 0x75,
+ 0x86, 0x57, 0x11, 0x32, 0x09, 0x47, 0x83, 0x93, 0x20, 0x94, 0x90, 0x88,
+ 0x39, 0x63, 0x22, 0x88, 0x54, 0x54, 0x95, 0x75, 0x67, 0x26, 0x02, 0x49,
+ 0x26, 0x17, 0x35, 0x16, 0x27, 0x65, 0x64, 0x26, 0x93, 0x92, 0x77, 0x85,
+ 0x84, 0x40, 0x59, 0x29, 0x49, 0x69, 0x94, 0x71, 0x72, 0x21, 0x55, 0x03,
+ 0x19, 0x74, 0x09, 0x40, 0x57, 0x68, 0x41, 0x19, 0x11, 0x21, 0x63, 0x56,
+ 0x29, 0x77, 0x57, 0x81, 0x44, 0x40, 0x76, 0x77, 0x02, 0x71, 0x66, 0x35,
+ 0x89, 0x02, 0x64, 0x51, 0x61, 0x02, 0x46, 0x91, 0x38, 0x93, 0x62, 0x57,
+ 0x18, 0x98, 0x12, 0x87, 0x29, 0x48, 0x65, 0x39, 0x99, 0x45, 0x54, 0x69,
+ 0x51, 0x16, 0x25, 0x75, 0x60, 0x70, 0x33, 0x72, 0x01, 0x60, 0x26, 0x51,
+ 0x44, 0x14, 0x39, 0x12, 0x95, 0x48, 0x87, 0x33, 0x90, 0x16, 0x42, 0x78,
+ 0x48, 0x58, 0x96, 0x93, 0x75, 0x23, 0x07, 0x13, 0x86, 0x07, 0x96, 0x30,
+ 0x22, 0x82, 0x91, 0x36, 0x72, 0x16, 0x48, 0x77, 0x64, 0x99, 0x07, 0x34,
+ 0x78, 0x60, 0x61, 0x13, 0x48, 0x93, 0x46, 0x62, 0x48, 0x38, 0x37, 0x96,
+ 0x58, 0x64, 0x39, 0x90, 0x69, 0x46, 0x81, 0x98, 0x61, 0x89, 0x15, 0x59,
+ 0x78, 0x98, 0x21, 0x34, 0x00, 0x69, 0x97, 0x80, 0x28, 0x81, 0x53, 0x49,
+ 0x79, 0x53, 0x92, 0x20, 0x29, 0x40, 0x70, 0x06, 0x09, 0x55, 0x99, 0x41,
+ 0x51, 0x35, 0x55, 0x27, 0x39, 0x06, 0x29, 0x83, 0x66, 0x03, 0x68, 0x14,
+ 0x11, 0x69, 0x95, 0x51, 0x71, 0x55, 0x24, 0x60, 0x52, 0x58, 0x88, 0x11,
+ 0x88, 0x25, 0x37, 0x86, 0x01, 0x52, 0x93, 0x52, 0x02, 0x24, 0x91, 0x58,
+ 0x56, 0x37, 0x50, 0x88, 0x39, 0x09, 0x61, 0x19, 0x08, 0x86, 0x29, 0x51,
+ 0x63, 0x38, 0x81, 0x14, 0x75, 0x75, 0x39, 0x99, 0x22, 0x04, 0x32, 0x63,
+ 0x14, 0x68, 0x41, 0x79, 0x09, 0x57, 0x87, 0x29, 0x26, 0x94, 0x05, 0x71,
+ 0x82, 0x41, 0x26, 0x98, 0x68, 0x18, 0x55, 0x42, 0x78, 0x05, 0x74, 0x17,
+ 0x34, 0x34, 0x07, 0x62, 0x94, 0x72, 0x21, 0x08, 0x54, 0x72, 0x21, 0x08,
+ 0x31, 0x53, 0x82, 0x35, 0x27, 0x40, 0x85, 0x77, 0x08, 0x52, 0x58, 0x48,
+ 0x03, 0x86, 0x65, 0x51, 0x96, 0x43, 0x89, 0x19, 0x15, 0x08, 0x49, 0x62,
+ 0x57, 0x46, 0x17, 0x68, 0x56, 0x04, 0x70, 0x63, 0x75, 0x88, 0x13, 0x27,
+ 0x87, 0x44, 0x46, 0x27, 0x02, 0x97, 0x71, 0x07, 0x40, 0x17, 0x24, 0x61,
+ 0x16, 0x94, 0x86, 0x85, 0x67, 0x58, 0x87, 0x92, 0x02, 0x84, 0x75, 0x19,
+ 0x43, 0x60, 0x68, 0x03, 0x54, 0x75, 0x33, 0x17, 0x97, 0x75, 0x12, 0x62,
+ 0x43, 0x08, 0x35, 0x75, 0x32, 0x21, 0x08, 0x82, 0x78, 0x04, 0x74, 0x09,
+ 0x13, 0x48, 0x63, 0x68, 0x67, 0x09, 0x08, 0x50, 0x11, 0x71, 0x64, 0x72,
+ 0x63, 0x76, 0x21, 0x62, 0x80, 0x57, 0x19, 0x15, 0x26, 0x88, 0x02, 0x26,
+ 0x83, 0x17, 0x61, 0x76, 0x28, 0x10, 0x22, 0x37, 0x56, 0x71, 0x51, 0x60,
+ 0x12, 0x79, 0x24, 0x83, 0x78, 0x47, 0x78, 0x20, 0x52, 0x27, 0x19, 0x88,
+ 0x81, 0x04, 0x70, 0x20, 0x25, 0x10, 0x04, 0x01, 0x72, 0x57, 0x30, 0x93,
+ 0x96, 0x23, 0x02, 0x94, 0x61, 0x44, 0x17, 0x65, 0x77, 0x60, 0x27, 0x43,
+ 0x24, 0x59, 0x46, 0x76, 0x00, 0x11, 0x31, 0x99, 0x41, 0x48, 0x75, 0x32,
+ 0x05, 0x15, 0x45, 0x31, 0x57, 0x89, 0x10, 0x47, 0x53, 0x14, 0x66, 0x54,
+ 0x60, 0x55, 0x36, 0x93, 0x30, 0x03, 0x63, 0x80, 0x65, 0x43, 0x17, 0x36,
+ 0x18, 0x64, 0x21, 0x38, 0x16, 0x19, 0x19, 0x51, 0x73, 0x80, 0x38, 0x27,
+ 0x30, 0x89, 0x13, 0x43, 0x54, 0x11, 0x78, 0x05, 0x24, 0x38, 0x83, 0x56,
+ 0x50, 0x59, 0x12, 0x47, 0x69, 0x70, 0x70, 0x91, 0x28, 0x02, 0x08, 0x91,
+ 0x66, 0x09, 0x31, 0x65, 0x46, 0x20, 0x04, 0x85, 0x89, 0x53, 0x91, 0x42,
+ 0x34, 0x09, 0x36, 0x92, 0x42, 0x06, 0x87, 0x88, 0x23, 0x54, 0x87, 0x85,
+ 0x52, 0x98, 0x95, 0x76, 0x13, 0x50, 0x59, 0x89, 0x18, 0x14, 0x17, 0x47,
+ 0x10, 0x97, 0x39, 0x14, 0x33, 0x79, 0x83, 0x62, 0x55, 0x18, 0x30, 0x83,
+ 0x03, 0x45, 0x38, 0x37, 0x35, 0x20, 0x94, 0x84, 0x89, 0x80, 0x89, 0x10,
+ 0x48, 0x77, 0x33, 0x36, 0x50, 0x07, 0x93, 0x02, 0x45, 0x42, 0x91, 0x12,
+ 0x98, 0x09, 0x77, 0x20, 0x31, 0x95, 0x10, 0x29, 0x89, 0x02, 0x38, 0x92,
+ 0x90, 0x19, 0x51, 0x10, 0x19, 0x82, 0x23, 0x68, 0x06, 0x00, 0x67, 0x50,
+ 0x25, 0x03, 0x41, 0x69, 0x53, 0x42, 0x23, 0x99, 0x29, 0x21, 0x63, 0x22,
+ 0x72, 0x54, 0x72, 0x40, 0x23, 0x39, 0x74, 0x92, 0x53, 0x28, 0x67, 0x56,
+ 0x46, 0x84, 0x59, 0x85, 0x10, 0x92, 0x31, 0x20, 0x39, 0x95, 0x65, 0x15,
+ 0x76, 0x35, 0x37, 0x21, 0x98, 0x41, 0x68, 0x74, 0x94, 0x94, 0x86, 0x90,
+ 0x35, 0x07, 0x06, 0x38, 0x78, 0x32, 0x00, 0x60, 0x86, 0x12, 0x34, 0x65,
+ 0x67, 0x35, 0x76, 0x94, 0x78, 0x22, 0x99, 0x42, 0x82, 0x40, 0x05, 0x74,
+ 0x18, 0x59, 0x03, 0x83, 0x89, 0x05, 0x19, 0x28, 0x88, 0x35, 0x59, 0x10,
+ 0x12, 0x96, 0x48, 0x67, 0x59, 0x87, 0x26, 0x85, 0x74, 0x64, 0x78, 0x56,
+ 0x91, 0x81, 0x45, 0x90, 0x21, 0x80, 0x32, 0x19, 0x61, 0x38, 0x61, 0x70,
+ 0x35, 0x08, 0x93, 0x53, 0x21, 0x95, 0x08, 0x27, 0x90, 0x28, 0x94, 0x27,
+ 0x35, 0x78, 0x03, 0x57, 0x74, 0x84, 0x73, 0x63, 0x27, 0x98, 0x14, 0x21,
+ 0x22, 0x36, 0x75, 0x31, 0x81, 0x65, 0x85, 0x51, 0x02, 0x45, 0x18, 0x06,
+ 0x39, 0x13, 0x29, 0x29, 0x73, 0x26, 0x99, 0x51, 0x38, 0x43, 0x35, 0x58,
+ 0x70, 0x92, 0x32, 0x13, 0x80, 0x16, 0x26, 0x44, 0x22, 0x28, 0x05, 0x45,
+ 0x86, 0x90, 0x38, 0x19, 0x40, 0x06, 0x30, 0x56, 0x94, 0x09, 0x02, 0x02,
+ 0x96, 0x29, 0x22, 0x44, 0x87, 0x38, 0x09, 0x95, 0x58, 0x46, 0x42, 0x78,
+ 0x72, 0x77, 0x86, 0x31, 0x97, 0x19, 0x86, 0x51, 0x73, 0x76, 0x63, 0x98,
+ 0x39, 0x40, 0x20, 0x20, 0x67, 0x42, 0x55, 0x50, 0x63, 0x76, 0x81, 0x87,
+ 0x13, 0x81, 0x19, 0x54, 0x11, 0x77, 0x90, 0x26, 0x47, 0x25, 0x92, 0x88,
+ 0x18, 0x56, 0x23, 0x73, 0x91, 0x52, 0x39, 0x08, 0x59, 0x51, 0x81, 0x57,
+ 0x78, 0x17, 0x13, 0x90, 0x90, 0x50, 0x65, 0x59, 0x99, 0x77, 0x42, 0x28,
+ 0x21, 0x59, 0x97, 0x64, 0x25, 0x17, 0x92, 0x24, 0x50, 0x00, 0x28, 0x40,
+ 0x85, 0x33, 0x78, 0x86, 0x79, 0x40, 0x28, 0x30, 0x14, 0x12, 0x01, 0x72,
+ 0x41, 0x43, 0x06, 0x87, 0x67, 0x31, 0x66, 0x77, 0x07, 0x50, 0x55, 0x50,
+ 0x22, 0x80, 0x42, 0x06, 0x38, 0x01, 0x63, 0x66, 0x70, 0x12, 0x52, 0x91,
+ 0x90, 0x97, 0x21, 0x28, 0x22, 0x65, 0x02, 0x80, 0x72, 0x31, 0x17, 0x76,
+ 0x35, 0x16, 0x03, 0x56, 0x59, 0x93, 0x36, 0x37, 0x67, 0x54, 0x46, 0x87,
+ 0x29, 0x01, 0x30, 0x80, 0x47, 0x47, 0x31, 0x98, 0x34, 0x30, 0x23, 0x86,
+ 0x86, 0x14, 0x05, 0x75, 0x09, 0x88, 0x77, 0x92, 0x59, 0x43, 0x98, 0x72,
+ 0x55, 0x54, 0x25, 0x59, 0x22, 0x27, 0x21, 0x62, 0x97, 0x10, 0x61, 0x73,
+ 0x86, 0x95, 0x99, 0x10, 0x62, 0x35, 0x25, 0x16, 0x62, 0x60, 0x51, 0x48,
+ 0x69, 0x69, 0x92, 0x27, 0x19, 0x43, 0x40, 0x52, 0x70, 0x23, 0x37, 0x28,
+ 0x73, 0x10, 0x32, 0x55, 0x85, 0x46, 0x97, 0x59, 0x88, 0x48, 0x54, 0x06,
+ 0x58, 0x04, 0x82, 0x98, 0x88, 0x34, 0x05, 0x41, 0x94, 0x44, 0x35, 0x10,
+ 0x96, 0x48, 0x21, 0x17, 0x24, 0x40, 0x26, 0x15, 0x49, 0x28, 0x12, 0x17,
+ 0x10, 0x17, 0x91, 0x42, 0x84, 0x15, 0x83, 0x36, 0x29, 0x49, 0x92, 0x77,
+ 0x74, 0x11, 0x72, 0x97, 0x64, 0x53, 0x23, 0x29, 0x16, 0x35, 0x22, 0x10,
+ 0x87, 0x07, 0x44, 0x78, 0x18, 0x19, 0x79, 0x03, 0x58, 0x24, 0x15, 0x63,
+ 0x55, 0x75, 0x56, 0x14, 0x63, 0x65, 0x86, 0x61, 0x92, 0x94, 0x30, 0x92,
+ 0x69, 0x78, 0x40, 0x95, 0x19, 0x81, 0x41, 0x66, 0x97, 0x00, 0x17, 0x37,
+ 0x20, 0x82, 0x14, 0x26, 0x42, 0x63, 0x84, 0x20, 0x96, 0x11, 0x68, 0x37,
+ 0x60, 0x28, 0x69, 0x85, 0x45, 0x04, 0x62, 0x20, 0x49, 0x39, 0x74, 0x84,
+ 0x60, 0x23, 0x38, 0x33, 0x42, 0x49, 0x38, 0x82, 0x30, 0x63, 0x21, 0x51,
+ 0x69, 0x09, 0x05, 0x55, 0x78, 0x90, 0x68, 0x69, 0x22, 0x20, 0x17, 0x26,
+ 0x54, 0x01, 0x10, 0x04, 0x68, 0x19, 0x88, 0x40, 0x91, 0x74, 0x81, 0x29,
+ 0x07, 0x45, 0x33, 0x77, 0x12, 0x47, 0x08, 0x60, 0x09, 0x42, 0x84, 0x15,
+ 0x63, 0x92, 0x64, 0x77, 0x07, 0x44, 0x11, 0x07, 0x79, 0x81, 0x24, 0x05,
+ 0x21, 0x60, 0x81, 0x70, 0x66, 0x36, 0x69, 0x68, 0x45, 0x01, 0x11, 0x95,
+ 0x67, 0x95, 0x55, 0x07, 0x96, 0x63, 0x84, 0x04, 0x74, 0x72, 0x61, 0x91,
+ 0x60, 0x09, 0x90, 0x14, 0x34, 0x94, 0x06, 0x12, 0x01, 0x94, 0x40, 0x14,
+ 0x12, 0x53, 0x64, 0x81, 0x75, 0x99, 0x36, 0x99, 0x11, 0x69, 0x95, 0x51,
+ 0x71, 0x55, 0x24, 0x60, 0x52, 0x58, 0x88, 0x11, 0x88, 0x25, 0x37, 0x86,
+ 0x66, 0x36, 0x69, 0x68, 0x45, 0x01, 0x11, 0x95
+};
+
+/* cipher text */
+uint8_t ciphertext[2048] = {
+ 0xE2, 0x19, 0x24, 0x56, 0x13, 0x59, 0xA6, 0x5D, 0xDF, 0xD0, 0x72, 0xAA,
+ 0x23, 0xC7, 0x36, 0x3A, 0xBB, 0x3E, 0x8B, 0x64, 0xD5, 0xBF, 0xDE, 0x65,
+ 0xA2, 0x75, 0xD9, 0x45, 0x6C, 0x3C, 0xD2, 0x6A, 0xC7, 0xD0, 0x9A, 0xD0,
+ 0x87, 0xB8, 0xE4, 0x94, 0x11, 0x62, 0x5A, 0xC3, 0xC3, 0x01, 0xA3, 0x86,
+ 0xBC, 0xBC, 0x9C, 0xC0, 0x81, 0x9F, 0xBF, 0x5C, 0x6F, 0x3F, 0x13, 0xF1,
+ 0xAE, 0xCF, 0x26, 0xB3, 0xBC, 0x49, 0xD6, 0x3B, 0x7A, 0x2E, 0x99, 0x9E,
+ 0x1B, 0x04, 0x50, 0x6C, 0x48, 0x6B, 0x4E, 0x72, 0xFC, 0xC8, 0xA7, 0x0C,
+ 0x2C, 0xD9, 0xED, 0xE4, 0x82, 0xC4, 0x81, 0xA6, 0xB4, 0xCC, 0xAD, 0x10,
+ 0xF3, 0x1C, 0x39, 0x05, 0x41, 0x2D, 0x57, 0x32, 0xE7, 0x16, 0xF8, 0x4D,
+ 0xF0, 0xDE, 0x40, 0x5B, 0x5F, 0x80, 0xDC, 0xA7, 0xC3, 0x2D, 0x3D, 0x9E,
+ 0x27, 0xD4, 0xE8, 0x10, 0x8E, 0xEB, 0xA5, 0x68, 0x6F, 0x3D, 0xC0, 0x44,
+ 0xE7, 0x77, 0x73, 0xB9, 0x92, 0x8E, 0xA2, 0x26, 0x5C, 0x6F, 0x33, 0x4B,
+ 0x0B, 0xEF, 0x37, 0x55, 0xBE, 0xEC, 0x98, 0x83, 0x1E, 0xDF, 0xB2, 0x9E,
+ 0x5D, 0x1D, 0x78, 0x14, 0xD7, 0x85, 0x0E, 0xF8, 0x12, 0x30, 0x8E, 0x5D,
+ 0x08, 0x77, 0x0B, 0x2E, 0x9B, 0xF9, 0xA6, 0x72, 0xD2, 0x41, 0xC1, 0x8E,
+ 0x6B, 0x5E, 0x11, 0x85, 0x22, 0x6E, 0xE4, 0xA3, 0xEA, 0x4C, 0x91, 0xE1,
+ 0x7D, 0xD0, 0xEB, 0x9F, 0xD9, 0xD7, 0x05, 0x77, 0xD9, 0xA1, 0xC2, 0xFD,
+ 0x41, 0x63, 0x51, 0xB4, 0x7A, 0x1F, 0x21, 0xF0, 0xBF, 0x11, 0x4D, 0x9B,
+ 0x97, 0xAB, 0xB4, 0x94, 0x36, 0x34, 0xC9, 0x2D, 0x8B, 0xE2, 0x61, 0xCF,
+ 0xAF, 0x69, 0xD5, 0x5C, 0xE9, 0xED, 0xE3, 0xA0, 0x69, 0xD3, 0xE5, 0xAE,
+ 0x67, 0x6C, 0xC7, 0x11, 0xB1, 0x21, 0x96, 0xD6, 0xDB, 0xA8, 0x1D, 0xC9,
+ 0x83, 0x0B, 0xE2, 0xC6, 0x6E, 0x94, 0xE9, 0x50, 0x12, 0x9B, 0x01, 0x72,
+ 0xAA, 0xFD, 0x8B, 0x7C, 0xEC, 0x0D, 0x01, 0xA4, 0x5D, 0x00, 0xE9, 0x79,
+ 0x58, 0xF5, 0x67, 0xF9, 0x61, 0xC3, 0x11, 0xB4, 0x7E, 0x76, 0x0A, 0x4C,
+ 0x60, 0xD6, 0xBD, 0xC8, 0x31, 0xD3, 0x0C, 0xD0, 0x5B, 0xDF, 0x7B, 0x05,
+ 0x9A, 0xBB, 0xC6, 0x2E, 0x9F, 0xF8, 0x18, 0x80, 0x6D, 0x1B, 0x21, 0xE5,
+ 0xAC, 0x75, 0xBC, 0x0D, 0x72, 0x51, 0x61, 0xD7, 0xEA, 0xA2, 0xAC, 0x0E,
+ 0xC1, 0xE7, 0x49, 0x37, 0xE7, 0x7C, 0xDE, 0xBD, 0x56, 0x00, 0x44, 0x6D,
+ 0xAB, 0x81, 0x2B, 0x26, 0x4A, 0xAA, 0x60, 0xE6, 0x43, 0x8D, 0x88, 0x1C,
+ 0x48, 0x55, 0x53, 0x25, 0xE8, 0x3C, 0x46, 0xF0, 0xA6, 0x33, 0x2D, 0xA2,
+ 0xDC, 0x99, 0x57, 0x38, 0x59, 0xCF, 0x53, 0xFA, 0x3E, 0x78, 0x46, 0xA0,
+ 0xA9, 0x50, 0x12, 0x72, 0xAC, 0x15, 0xC6, 0xA7, 0x42, 0x0F, 0x59, 0x6E,
+ 0xEA, 0xB0, 0x3D, 0xB8, 0x94, 0x32, 0xD1, 0xB6, 0xE8, 0x90, 0x06, 0x66,
+ 0x0C, 0xDE, 0xA9, 0x35, 0xC7, 0xDD, 0x72, 0x42, 0x38, 0x33, 0x32, 0x2F,
+ 0x2C, 0x3F, 0xBD, 0x01, 0xD6, 0x47, 0xFC, 0x89, 0x31, 0x38, 0x2E, 0xB9,
+ 0x6B, 0xED, 0xDB, 0x85, 0x38, 0xB1, 0xA5, 0x50, 0xFA, 0xFB, 0xA7, 0x31,
+ 0xEC, 0xB6, 0xBB, 0x82, 0x50, 0xB4, 0x88, 0x5C, 0xED, 0xE5, 0x4B, 0x5B,
+ 0xBF, 0xB3, 0x18, 0xFB, 0xAD, 0x24, 0x41, 0x55, 0x80, 0xCD, 0xA3, 0xA1,
+ 0xD6, 0xD5, 0xB6, 0x06, 0xE9, 0x85, 0x12, 0x33, 0x52, 0x56, 0xF1, 0xB7,
+ 0xDC, 0x57, 0x9E, 0xB4, 0x00, 0x1E, 0xCB, 0x62, 0x13, 0x4C, 0x90, 0x9A,
+ 0x9D, 0x64, 0x80, 0xD1, 0x5E, 0xB3, 0xCB, 0x8A, 0x73, 0x4E, 0x7B, 0xBE,
+ 0x4D, 0xA7, 0xF7, 0xB7, 0x9C, 0x1C, 0x7F, 0x27, 0x1E, 0x7F, 0x58, 0xB2,
+ 0x74, 0xAF, 0x94, 0x0E, 0x19, 0x23, 0xE1, 0x6B, 0xD8, 0x20, 0x4F, 0x2C,
+ 0x13, 0xE8, 0x8C, 0x37, 0x46, 0x27, 0x55, 0x68, 0xDA, 0x3F, 0x7A, 0xC6,
+ 0xEF, 0x87, 0x1D, 0x3B, 0x95, 0x43, 0x5E, 0x75, 0xE0, 0x02, 0x22, 0x0E,
+ 0x11, 0x60, 0xAB, 0x1A, 0x91, 0x94, 0xC4, 0xFA, 0xD9, 0x92, 0x2B, 0xE5,
+ 0x03, 0xE0, 0x7A, 0x17, 0x5C, 0x67, 0x22, 0xB3, 0xCB, 0x77, 0x9E, 0x22,
+ 0x01, 0x5F, 0x5D, 0x64, 0xE4, 0x2F, 0xC4, 0x61, 0xCA, 0xC7, 0xFD, 0x20,
+ 0x24, 0x30, 0xAB, 0x3F, 0x1A, 0x08, 0x85, 0x08, 0x39, 0xDE, 0x19, 0x1C,
+ 0x1A, 0xEA, 0xB8, 0x7E, 0xE5, 0xBC, 0xD9, 0xB2, 0x59, 0xC8, 0x81, 0x02,
+ 0x1D, 0x5C, 0xC0, 0xDD, 0x8D, 0x56, 0xB6, 0x2E, 0x85, 0x26, 0xA8, 0x34,
+ 0x92, 0x36, 0x9A, 0x84, 0xBD, 0x27, 0xC1, 0x9D, 0x5E, 0x14, 0xC4, 0xB7,
+ 0x02, 0xA8, 0xC9, 0xC2, 0xAD, 0xDC, 0x98, 0x42, 0x51, 0xDE, 0x94, 0x28,
+ 0x39, 0xEF, 0xE9, 0x7F, 0x05, 0x3F, 0x1D, 0x67, 0x72, 0x04, 0xCF, 0x7D,
+ 0x38, 0x49, 0xC4, 0x59, 0xA5, 0xF6, 0xB6, 0x02, 0x31, 0xD0, 0x05, 0x74,
+ 0x4B, 0xD0, 0x89, 0xD1, 0x7F, 0xC6, 0xDB, 0x7E, 0x75, 0x62, 0xA3, 0xC2,
+ 0x2E, 0xB0, 0xCC, 0x9A, 0xD3, 0xA4, 0x14, 0xB6, 0xF2, 0x91, 0x44, 0x3F,
+ 0x84, 0xE0, 0x90, 0x4A, 0x6A, 0x34, 0x8C, 0x35, 0x3C, 0xB2, 0xA9, 0x35,
+ 0x88, 0xB0, 0x88, 0xF8, 0x7E, 0x5C, 0xD2, 0x08, 0x5E, 0x08, 0x15, 0x03,
+ 0xBC, 0xF5, 0x42, 0x6B, 0x28, 0xED, 0xDD, 0xAA, 0x4D, 0x78, 0x10, 0x31,
+ 0x32, 0xA2, 0xC5, 0xCA, 0xEE, 0x9A, 0x62, 0x52, 0x3E, 0x48, 0x83, 0xA4,
+ 0xCA, 0xD4, 0xC7, 0xA7, 0xA5, 0x3F, 0x44, 0x1C, 0x86, 0xAD, 0x52, 0x7D,
+ 0x80, 0x1D, 0x9E, 0x32, 0x3F, 0x2A, 0x2E, 0xD8, 0x89, 0xC1, 0xA4, 0xD6,
+ 0xC1, 0x90, 0x2E, 0x1A, 0x20, 0x4B, 0x87, 0x32, 0x35, 0x25, 0xD8, 0xB8,
+ 0x57, 0x15, 0x85, 0x1E, 0x3C, 0x8A, 0xDC, 0x1A, 0x49, 0x3D, 0x70, 0x35,
+ 0x99, 0xAA, 0xDE, 0x2C, 0xD4, 0xAF, 0x79, 0x72, 0xAB, 0x97, 0x84, 0x20,
+ 0xB6, 0x4F, 0x34, 0x3F, 0xEA, 0xAE, 0x5F, 0x8F, 0x3A, 0x42, 0xDB, 0x68,
+ 0xE5, 0x84, 0x63, 0x2E, 0x7A, 0x0E, 0xBD, 0x28, 0x6A, 0x24, 0xB6, 0xAB,
+ 0xE4, 0xAC, 0x20, 0x7C, 0x81, 0xD0, 0x69, 0x89, 0xF8, 0xDE, 0xA9, 0x02,
+ 0xFD, 0x1F, 0x08, 0xDA, 0x26, 0xC2, 0x24, 0xCA, 0xEB, 0x44, 0x16, 0x8D,
+ 0x55, 0x5F, 0xB9, 0xA9, 0x5A, 0x18, 0x50, 0xB1, 0x54, 0xF1, 0xBF, 0x06,
+ 0xC2, 0xB0, 0x95, 0xC2, 0xAE, 0xE5, 0xBF, 0xB3, 0xFD, 0xC9, 0xBF, 0x75,
+ 0x42, 0x7D, 0xA0, 0xA8, 0x95, 0xF9, 0x62, 0x3B, 0x9C, 0x0D, 0x81, 0xF3,
+ 0x9C, 0xFC, 0x19, 0x5B, 0xF7, 0xD1, 0x9C, 0xF0, 0xAA, 0xFE, 0xEF, 0x35,
+ 0x1E, 0x81, 0x9E, 0x02, 0x46, 0x52, 0x9B, 0x99, 0x0D, 0x12, 0x8B, 0x71,
+ 0x6C, 0x32, 0xB5, 0x23, 0x17, 0x03, 0xC5, 0xB0, 0xA1, 0xC3, 0x4B, 0x10,
+ 0x01, 0x4D, 0x4C, 0x4A, 0x46, 0x8F, 0xD9, 0x79, 0xBB, 0x10, 0x44, 0xB0,
+ 0x3C, 0x7D, 0x46, 0xFD, 0x38, 0xDF, 0xAF, 0x6E, 0x58, 0x7D, 0xE1, 0xEB,
+ 0xBB, 0x8C, 0xDC, 0x79, 0xDA, 0x41, 0xD1, 0x8B, 0x0B, 0x11, 0x4F, 0xE5,
+ 0x1C, 0xC1, 0x59, 0xA7, 0x1E, 0x5A, 0xC1, 0xEE, 0x27, 0x33, 0xC8, 0x55,
+ 0xA9, 0x32, 0xEA, 0xF7, 0x45, 0xB0, 0x08, 0xE9, 0x32, 0xDF, 0x70, 0x24,
+ 0x82, 0xD3, 0x2A, 0x3E, 0x4F, 0x42, 0xB9, 0x25, 0x10, 0xD1, 0x73, 0xFA,
+ 0xFD, 0xC1, 0x84, 0xF2, 0xF7, 0x0E, 0xBC, 0x9D, 0x90, 0x39, 0xD7, 0xFD,
+ 0x45, 0x77, 0xBA, 0x29, 0xF9, 0x87, 0x45, 0xC1, 0x32, 0x44, 0xB0, 0x27,
+ 0x6B, 0xFC, 0x8A, 0xFE, 0x00, 0x6F, 0x61, 0x98, 0xD0, 0x60, 0xC8, 0x10,
+ 0xE5, 0xBC, 0x88, 0x13, 0x45, 0x44, 0xA5, 0xEB, 0x6E, 0xCB, 0x11, 0xAF,
+ 0x30, 0xDC, 0x8B, 0xF8, 0x30, 0x46, 0xDA, 0x76, 0xF1, 0xE5, 0x14, 0x51,
+ 0x8A, 0x02, 0x5A, 0x5A, 0xAA, 0x7B, 0x2D, 0x57, 0x0A, 0x5C, 0x73, 0xD1,
+ 0x88, 0xCE, 0xBE, 0x3D, 0x06, 0x3F, 0x48, 0x1D, 0x44, 0x24, 0x6F, 0x4F,
+ 0x7F, 0x6A, 0xF2, 0x16, 0x34, 0x35, 0x38, 0x73, 0x8A, 0xE5, 0x25, 0xF4,
+ 0x34, 0x9E, 0x5B, 0x40, 0x90, 0x04, 0x57, 0x1B, 0x57, 0x75, 0x8F, 0xEA,
+ 0x1C, 0xF8, 0x7A, 0x68, 0x01, 0x1C, 0x8D, 0xBA, 0xF4, 0xE3, 0xD3, 0x8F,
+ 0x7F, 0xE4, 0x50, 0x35, 0x6B, 0x6B, 0xF6, 0xFC, 0x5F, 0x9B, 0x98, 0x78,
+ 0x16, 0x68, 0x72, 0x74, 0x71, 0x78, 0x25, 0x68, 0xE5, 0x1E, 0x66, 0xE2,
+ 0x4E, 0xC8, 0xDB, 0x92, 0x8E, 0x88, 0x64, 0x74, 0xDE, 0xDB, 0x85, 0x56,
+ 0x9F, 0xF9, 0xC4, 0x29, 0x54, 0xA8, 0xFB, 0xBA, 0xEA, 0xAB, 0xC7, 0x49,
+ 0x5C, 0x6C, 0xD7, 0x61, 0x8C, 0xE2, 0x2B, 0xF5, 0xA0, 0xA8, 0xD2, 0x41,
+ 0xC0, 0x54, 0xAB, 0xA7, 0x56, 0x5C, 0xE7, 0xA5, 0xEA, 0xBC, 0x47, 0xD1,
+ 0x0D, 0xD9, 0xC0, 0xA9, 0xC4, 0xA7, 0x3E, 0xD1, 0x2B, 0x1E, 0x34, 0x31,
+ 0x36, 0x9D, 0xB9, 0x51, 0xD3, 0xAD, 0x29, 0xE6, 0x9B, 0xD8, 0x4B, 0x93,
+ 0x33, 0x2F, 0x30, 0xEF, 0x18, 0x90, 0x69, 0x11, 0x09, 0xEA, 0xBA, 0xE0,
+ 0x10, 0x93, 0x63, 0x71, 0xA8, 0x83, 0x59, 0xDB, 0xFC, 0x12, 0x22, 0x84,
+ 0xC7, 0x01, 0x20, 0x99, 0xEC, 0x59, 0xA9, 0xE6, 0x9B, 0x5B, 0x8B, 0xB8,
+ 0x68, 0x52, 0x61, 0x8B, 0x4E, 0xF3, 0x50, 0x69, 0xF1, 0x49, 0x9B, 0xAF,
+ 0x53, 0xAD, 0xA0, 0x9D, 0x23, 0xE0, 0xE0, 0xC4, 0x31, 0xE4, 0x8E, 0x1C,
+ 0x51, 0x14, 0xFC, 0x95, 0x9C, 0xA6, 0x34, 0x85, 0xB0, 0x36, 0xFC, 0x7A,
+ 0x53, 0x03, 0x31, 0x0E, 0xCB, 0x34, 0x3E, 0xDF, 0xD1, 0x71, 0xBC, 0xDB,
+ 0xA1, 0xAF, 0x59, 0x4A, 0x03, 0x19, 0xA7, 0x8E, 0xB5, 0x82, 0x15, 0x24,
+ 0x69, 0x68, 0xBD, 0x9C, 0x2E, 0xFA, 0x06, 0xB5, 0x70, 0xC5, 0x70, 0xC4,
+ 0x14, 0x99, 0x01, 0x49, 0xBD, 0x6E, 0xAE, 0x10, 0xA1, 0xE4, 0xEF, 0xDD,
+ 0xE5, 0x51, 0x22, 0x9D, 0xF7, 0x93, 0xAB, 0x41, 0xBD, 0x86, 0x7A, 0xCC,
+ 0x51, 0x94, 0xEC, 0x22, 0xBE, 0x0D, 0x67, 0xFD, 0xA3, 0xFD, 0xCF, 0xF8,
+ 0x74, 0x0A, 0x5E, 0x1C, 0x71, 0xAD, 0xB6, 0xD0, 0xD7, 0xF8, 0x71, 0x34,
+ 0xAB, 0x62, 0xE7, 0xA8, 0x6B, 0x8F, 0x1E, 0x43, 0x46, 0xA5, 0xE4, 0xB4,
+ 0x52, 0x81, 0x66, 0xB3, 0xE5, 0x10, 0x23, 0x21, 0x2B, 0x31, 0x0F, 0xB8,
+ 0xB6, 0xC5, 0xA5, 0xC9, 0x90, 0x07, 0x83, 0xD0, 0xC3, 0x10, 0x7A, 0x04,
+ 0xBD, 0x8A, 0x3C, 0x7B, 0xF9, 0x0E, 0x51, 0x81, 0x96, 0xC8, 0xAE, 0xF9,
+ 0x27, 0xDE, 0x62, 0x7A, 0x41, 0x60, 0x35, 0x8F, 0x77, 0xBC, 0x95, 0x11,
+ 0x2C, 0xC4, 0x6C, 0x47, 0x7A, 0xEB, 0x29, 0xE5, 0x8E, 0xB5, 0xD6, 0xA5,
+ 0x54, 0x1B, 0xD0, 0xE0, 0x0F, 0x7D, 0x5C, 0x51, 0xD8, 0x6C, 0x92, 0x2F,
+ 0x13, 0x4E, 0x90, 0x77, 0xF8, 0x8D, 0x69, 0x78, 0x96, 0x96, 0x49, 0x9F,
+ 0x3C, 0x2E, 0x5C, 0xA6, 0x73, 0x27, 0x7D, 0xAD, 0x8D, 0xE3, 0x9B, 0x4A,
+ 0x2F, 0x50, 0x0A, 0x42, 0x7E, 0xF2, 0x3B, 0x50, 0x5C, 0x81, 0xC9, 0x49,
+ 0x01, 0x96, 0x83, 0x0A, 0xEC, 0x7F, 0xED, 0x1C, 0xA5, 0x7D, 0xF1, 0xE6,
+ 0xC4, 0xB3, 0x8F, 0xF9, 0x0F, 0xDB, 0x7B, 0xC1, 0x35, 0xF7, 0x63, 0x4A,
+ 0x39, 0xD4, 0x0E, 0x9E, 0x05, 0xD9, 0x42, 0xAA, 0xAB, 0x52, 0xCA, 0x4E,
+ 0x98, 0x3B, 0x43, 0x1A, 0x91, 0x25, 0xA9, 0x34, 0xD5, 0x66, 0xB2, 0xF4,
+ 0xFF, 0xDE, 0x64, 0x91, 0x90, 0xB9, 0x17, 0x70, 0xA0, 0xD6, 0xEA, 0xB6,
+ 0x36, 0xF4, 0x44, 0xCE, 0x86, 0x7B, 0x18, 0x74, 0x9C, 0x18, 0xAD, 0xB6,
+ 0xE0, 0x74, 0xC1, 0x0E, 0x29, 0x5D, 0x6A, 0x36, 0xD1, 0x3E, 0xB8, 0x2A,
+ 0xE4, 0x23, 0x1D, 0xB2, 0xAE, 0xF5, 0x5B, 0x8E, 0x2C, 0xD9, 0xD1, 0xE1,
+ 0x4F, 0x58, 0xA6, 0xE3, 0x88, 0x2E, 0xF9, 0xCF, 0x32, 0x3E, 0x8E, 0x37,
+ 0x95, 0xFF, 0xAD, 0x68, 0x11, 0x5E, 0x7F, 0x3D, 0x38, 0x06, 0x7C, 0x33,
+ 0x32, 0x78, 0x09, 0xEC, 0xCA, 0x3E, 0x08, 0xF1, 0xD0, 0x95, 0x19, 0xC9,
+ 0x7E, 0x62, 0xB2, 0x02, 0xA3, 0x5D, 0xF8, 0x3F, 0xA2, 0xB0, 0x8B, 0x38,
+ 0xB1, 0x8C, 0xEA, 0xB3, 0xE4, 0xBF, 0xD3, 0x6C, 0x6D, 0x3D, 0xD1, 0xC6,
+ 0xDA, 0x6B, 0x7A, 0xBA, 0x05, 0xEA, 0x9E, 0xA5, 0xE9, 0x00, 0xCC, 0x80,
+ 0x57, 0xAB, 0xD9, 0x0A, 0xD1, 0x00, 0x82, 0x2A, 0x51, 0x4B, 0xA2, 0x96,
+ 0xEB, 0x96, 0x14, 0xA8, 0x46, 0xDF, 0x1D, 0x48, 0xAE, 0xFA, 0x12, 0xA8,
+ 0x89, 0x8E, 0xEF, 0xBC, 0x3C, 0xA1, 0x6E, 0xDD, 0x90, 0x66, 0x2E, 0x56,
+ 0x6B, 0xF7, 0x1D, 0xF0, 0x46, 0x11, 0x4A, 0xA6, 0x07, 0x73, 0xC4, 0xE3,
+ 0x97, 0xFE, 0x7E, 0x22, 0x6F, 0x22, 0xB4, 0x6F, 0xB0, 0x32, 0x0A, 0x5E,
+ 0x85, 0x7E, 0x54, 0xB4, 0x24, 0xBD, 0x36, 0xA7, 0x94, 0xE7, 0x37, 0xFD,
+ 0x1A, 0xAF, 0xF4, 0x44, 0xB4, 0x35, 0x4F, 0xE0, 0x41, 0x0E, 0x7D, 0x73,
+ 0x29, 0x28, 0xDA, 0xAF, 0x69, 0xB2, 0xC5, 0xA7, 0x2A, 0x0A, 0xB5, 0x9C,
+ 0xC2, 0xAC, 0x5F, 0x59, 0x5C, 0xEE, 0x44, 0x49, 0x6F, 0x4F, 0x64, 0x43,
+ 0x6F, 0x43, 0x44, 0xAA, 0xA0, 0x4E, 0x94, 0x7C, 0x26, 0x5A, 0xF1, 0xD9,
+ 0xE6, 0x09, 0x80, 0x7A, 0x7D, 0x2E, 0xA2, 0xB9, 0x1A, 0x7A, 0x8F, 0x2A,
+ 0x97, 0x77, 0x23, 0xB4, 0x10, 0xAD, 0x20, 0x7B, 0xA3, 0x0F, 0xFD, 0x44,
+ 0x38, 0xAD, 0x94, 0x39, 0x88, 0x1C, 0xC4, 0xC8, 0xDF, 0xF1, 0x04, 0xA6,
+ 0x51, 0x5D, 0x54, 0x53, 0x60, 0xE4, 0x8A, 0x89, 0x4A, 0x9C, 0xE1, 0x68,
+ 0x4D, 0xFE, 0x69, 0x94, 0x0B, 0x8E, 0xED, 0x6C, 0xFE, 0x11, 0xA7, 0x77,
+ 0xBF, 0x08, 0x41, 0x67, 0x22, 0x59, 0x51, 0x48, 0xEE, 0x59, 0x02, 0x0E,
+ 0x60, 0x6D, 0xAE, 0x8C, 0xC6, 0x39, 0xB7, 0x55, 0xC5, 0x3B, 0x87, 0xA9,
+ 0xBD, 0xD8, 0xEA, 0x48, 0x21, 0xE4, 0x57, 0x51, 0x56, 0x03, 0xF4, 0xBE,
+ 0xBD, 0xBD, 0xC5, 0x26, 0x9B, 0x27, 0xE3, 0xAE, 0xD5, 0x1E, 0x30, 0xE9,
+ 0x7C, 0x9D, 0xDB, 0xE1, 0x09, 0x9D, 0x82, 0x49, 0x15, 0x38, 0x69, 0xFC,
+ 0x1D, 0x52, 0x1A, 0x75, 0xE6, 0xDD, 0x1D, 0xBE, 0x06, 0xC4, 0x9F, 0x14,
+ 0x4C, 0x12, 0xDE, 0xDF, 0x4A, 0xE1, 0x3B, 0xE7, 0xD1, 0xE3, 0x71, 0xD1,
+ 0xFA, 0xD8, 0x0E, 0x63, 0x27, 0xA9, 0xC7, 0x9D, 0xC0, 0x01, 0xC2, 0xDD,
+ 0xFC, 0xA6, 0x1F, 0x59, 0x87, 0xC5, 0x56, 0x99, 0x80, 0xEB, 0xF0, 0xB8,
+ 0xB3, 0x00, 0x9A, 0x61, 0xDB, 0x50, 0x79, 0x48, 0x37, 0x35, 0xDA, 0xD8,
+ 0xF2, 0x37, 0xA7, 0x43, 0xA7, 0xEB, 0x88, 0x2C, 0x68, 0xB4, 0xBB, 0x14,
+ 0x45, 0x31, 0x6B, 0x87, 0x65, 0xE7, 0x82, 0xB4, 0x74, 0xD2, 0xFF, 0x7F,
+ 0x60, 0x15, 0x94, 0x75, 0xEE, 0x30, 0x3C, 0x4E, 0xFC, 0x41, 0xD1, 0x5B,
+ 0xDD, 0x84, 0x6E, 0x13, 0x6C, 0xF8, 0x12, 0xE6, 0xB7, 0xA4, 0xB9, 0xC8,
+ 0x13, 0x89, 0x0C, 0x34, 0xA6, 0xAF, 0x09, 0xEB, 0xF2, 0xB3, 0x79, 0x77,
+ 0x80, 0xD8, 0x77, 0x64, 0xAD, 0x32, 0x3D, 0xD2, 0x06, 0xDF, 0x72, 0x11,
+ 0x4A, 0xA7, 0x70, 0xCE, 0xF9, 0xE6, 0x81, 0x35, 0xA4, 0xA7, 0x52, 0xB5,
+ 0x13, 0x68, 0x5C, 0x69, 0x45, 0xE2, 0x77, 0x2D, 0xBE, 0x2C, 0xE9, 0x38,
+ 0x25, 0x28, 0x7B, 0x63, 0x2C, 0x19, 0x8F, 0x59
+};
+
+/* aad */
+uint8_t aad[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B
+};
+
+/* iv */
+uint8_t iv[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F
+};
+
+/* cipher key */
+uint8_t cipher_key[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F
+};
+
+/* auth key */
+uint8_t auth_key[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23,
+ 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B,
+ 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53,
+ 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B,
+ 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F
+};
+
+/* Digests */
+uint8_t digest[2048] = { 0x00 };
+
+struct cperf_test_vector*
+cperf_test_vector_get_dummy(struct cperf_options *options)
+{
+ struct cperf_test_vector *t_vec;
+
+ t_vec = (struct cperf_test_vector *)rte_malloc(NULL,
+ sizeof(struct cperf_test_vector), 0);
+ if (t_vec == NULL)
+ return t_vec;
+
+ t_vec->plaintext.data = plaintext;
+ t_vec->plaintext.length = options->buffer_sz;
+
+ if (options->op_type == CPERF_CIPHER_ONLY ||
+ options->op_type == CPERF_CIPHER_THEN_AUTH ||
+ options->op_type == CPERF_AUTH_THEN_CIPHER ||
+ options->op_type == CPERF_AEAD) {
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
+ t_vec->cipher_key.length = -1;
+ t_vec->ciphertext.data = plaintext;
+ t_vec->cipher_key.data = NULL;
+ t_vec->iv.data = NULL;
+ } else {
+ t_vec->cipher_key.length = options->cipher_key_sz;
+ t_vec->ciphertext.data = ciphertext;
+ t_vec->cipher_key.data = cipher_key;
+ t_vec->iv.data = rte_malloc(NULL, options->cipher_iv_sz,
+ 16);
+ if (t_vec->iv.data == NULL) {
+ rte_free(t_vec);
+ return NULL;
+ }
+ memcpy(t_vec->iv.data, iv, options->cipher_iv_sz);
+ }
+ t_vec->ciphertext.length = options->buffer_sz;
+ t_vec->iv.phys_addr = rte_malloc_virt2phy(t_vec->iv.data);
+ t_vec->iv.length = options->cipher_iv_sz;
+ t_vec->data.cipher_offset = 0;
+ t_vec->data.cipher_length = options->buffer_sz;
+ }
+
+ if (options->op_type == CPERF_AUTH_ONLY ||
+ options->op_type == CPERF_CIPHER_THEN_AUTH ||
+ options->op_type == CPERF_AUTH_THEN_CIPHER ||
+ options->op_type == CPERF_AEAD) {
+ t_vec->auth_key.length = options->auth_key_sz;
+ if (options->auth_algo == RTE_CRYPTO_AUTH_NULL) {
+ t_vec->auth_key.data = NULL;
+ t_vec->aad.data = NULL;
+ } else if (options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM ||
+ options->auth_algo ==
+ RTE_CRYPTO_AUTH_AES_GMAC ||
+ options->auth_algo ==
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
+ options->auth_algo ==
+ RTE_CRYPTO_AUTH_KASUMI_F9 ||
+ options->auth_algo ==
+ RTE_CRYPTO_AUTH_ZUC_EIA3) {
+ t_vec->auth_key.data = NULL;
+ t_vec->aad.data = rte_malloc(NULL, options->auth_aad_sz,
+ 16);
+ if (t_vec->aad.data == NULL) {
+ if (options->op_type != CPERF_AUTH_ONLY)
+ rte_free(t_vec->iv.data);
+ rte_free(t_vec);
+ return NULL;
+ }
+ memcpy(t_vec->aad.data, aad, options->auth_aad_sz);
+ } else {
+ t_vec->auth_key.data = auth_key;
+ t_vec->aad.data = NULL;
+ }
+
+ t_vec->aad.phys_addr = rte_malloc_virt2phy(t_vec->aad.data);
+ t_vec->aad.length = options->auth_aad_sz;
+ t_vec->digest.data = rte_malloc(NULL, options->auth_digest_sz,
+ 16);
+ if (t_vec->digest.data == NULL) {
+ if (options->op_type != CPERF_AUTH_ONLY)
+ rte_free(t_vec->iv.data);
+ rte_free(t_vec->aad.data);
+ rte_free(t_vec);
+ return NULL;
+ }
+ t_vec->digest.phys_addr =
+ rte_malloc_virt2phy(t_vec->digest.data);
+ t_vec->digest.length = options->auth_digest_sz;
+ memcpy(t_vec->digest.data, digest, options->auth_digest_sz);
+ t_vec->data.auth_offset = 0;
+ t_vec->data.auth_length = options->buffer_sz;
+ }
+
+ return t_vec;
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_TEST_VECTRORS_
+#define _CPERF_TEST_VECTRORS_
+
+#include "cperf_options.h"
+
+struct cperf_test_vector {
+ struct {
+ uint8_t *data;
+ uint32_t length;
+ } plaintext;
+
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } cipher_key;
+
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } auth_key;
+
+ struct {
+ uint8_t *data;
+ phys_addr_t phys_addr;
+ uint16_t length;
+ } iv;
+
+ struct {
+ uint8_t *data;
+ uint32_t length;
+ } ciphertext;
+
+ struct {
+ uint8_t *data;
+ phys_addr_t phys_addr;
+ uint16_t length;
+ } aad;
+
+ struct {
+ uint8_t *data;
+ phys_addr_t phys_addr;
+ uint16_t length;
+ } digest;
+
+ struct {
+ uint32_t auth_offset;
+ uint32_t auth_length;
+ uint32_t cipher_offset;
+ uint32_t cipher_length;
+ } data;
+};
+
+struct cperf_test_vector*
+cperf_test_vector_get_dummy(struct cperf_options *options);
+
+extern uint8_t ciphertext[2048];
+
+extern uint8_t cipher_key[];
+extern uint8_t auth_key[];
+
+extern uint8_t iv[];
+extern uint8_t aad[];
+
+extern uint8_t digest[2048];
+
+#endif
--- /dev/null
+#include <stdio.h>
+
+#include <rte_malloc.h>
+#include "cperf_options.h"
+#include "cperf_test_vectors.h"
+#include "cperf_verify_parser.h"
+
+int
+free_test_vector(struct cperf_test_vector *vector, struct cperf_options *opts)
+{
+ if (vector == NULL || opts == NULL)
+ return -1;
+
+ if (opts->test_file == NULL) {
+ if (vector->iv.data)
+ rte_free(vector->iv.data);
+ if (vector->aad.data)
+ rte_free(vector->aad.data);
+ if (vector->digest.data)
+ rte_free(vector->digest.data);
+ rte_free(vector);
+
+ } else {
+ if (vector->plaintext.data)
+ rte_free(vector->plaintext.data);
+ if (vector->cipher_key.data)
+ rte_free(vector->cipher_key.data);
+ if (vector->auth_key.data)
+ rte_free(vector->auth_key.data);
+ if (vector->iv.data)
+ rte_free(vector->iv.data);
+ if (vector->ciphertext.data)
+ rte_free(vector->ciphertext.data);
+ if (vector->aad.data)
+ rte_free(vector->aad.data);
+ if (vector->digest.data)
+ rte_free(vector->digest.data);
+ rte_free(vector);
+ }
+
+ return 0;
+}
+
+/* trim leading and trailing spaces */
+static char *
+trim(char *str)
+{
+ char *start, *end;
+
+ for (start = str; *start; start++) {
+ if (!isspace((unsigned char) start[0]))
+ break;
+ }
+
+ for (end = start + strlen(start); end > start + 1; end--) {
+ if (!isspace((unsigned char) end[-1]))
+ break;
+ }
+
+ *end = 0;
+
+ /* Shift from "start" to the beginning of the string */
+ if (start > str)
+ memmove(str, start, (end - start) + 1);
+
+ return str;
+}
+
+/* tokenization test values separated by a comma */
+static int
+parse_values(char *tokens, uint8_t **data, uint32_t *data_length)
+{
+ uint8_t n_tokens;
+ uint32_t data_size = 32;
+ uint8_t *values;
+ char *tok, *error = NULL;
+
+ tok = strtok(tokens, VALUE_DELIMITER);
+ if (tok == NULL)
+ return -1;
+
+ values = (uint8_t *) rte_zmalloc(NULL, sizeof(uint8_t) * data_size, 0);
+ if (values == NULL)
+ return -1;
+
+ n_tokens = 0;
+ while (tok != NULL) {
+ uint8_t *values_extended = NULL;
+
+ if (n_tokens >= data_size) {
+
+ data_size *= 2;
+
+ values_extended = (uint8_t *) rte_realloc(values,
+ sizeof(uint8_t) * data_size, 0);
+ if (values_extended == NULL) {
+ rte_free(values);
+ return -1;
+ }
+
+ values = values_extended;
+ }
+
+ values[n_tokens] = (uint8_t) strtoul(tok, &error, 0);
+ if ((error == NULL) || (*error != '\0')) {
+ printf("Failed with convert '%s'\n", tok);
+ rte_free(values);
+ return -1;
+ }
+
+ tok = strtok(NULL, VALUE_DELIMITER);
+ if (tok == NULL)
+ break;
+
+ n_tokens++;
+ }
+
+ uint8_t *resize_values = (uint8_t *) rte_realloc(values,
+ sizeof(uint8_t) * (n_tokens + 1), 0);
+
+ if (resize_values == NULL) {
+ rte_free(values);
+ return -1;
+ }
+
+ *data = resize_values;
+ *data_length = n_tokens + 1;
+
+ return 0;
+}
+
+/* checks the type of key and assigns data */
+static int
+parse_entry(char *entry, struct cperf_test_vector *vector)
+{
+ char *token, *key_token;
+ uint8_t *data = NULL;
+ int status;
+ uint32_t data_length;
+
+ /* get key */
+ token = strtok(entry, ENTRY_DELIMITER);
+ key_token = token;
+
+ /* get values for key */
+ token = strtok(NULL, ENTRY_DELIMITER);
+
+ if (token == NULL) {
+ printf("Expected 'key = values' but was '%.40s'..\n",
+ key_token);
+ return -1;
+ }
+
+ status = parse_values(token, &data, &data_length);
+ if (status)
+ return -1;
+
+ /* compare keys */
+ if (strstr(key_token, "plaintext")) {
+ if (vector->plaintext.data)
+ rte_free(vector->plaintext.data);
+ vector->plaintext.data = data;
+ vector->plaintext.length = data_length;
+ } else if (strstr(key_token, "cipher_key")) {
+ if (vector->cipher_key.data)
+ rte_free(vector->cipher_key.data);
+ vector->cipher_key.data = data;
+ vector->cipher_key.length = data_length;
+ } else if (strstr(key_token, "auth_key")) {
+ if (vector->auth_key.data)
+ rte_free(vector->auth_key.data);
+ vector->auth_key.data = data;
+ vector->auth_key.length = data_length;
+ } else if (strstr(key_token, "iv")) {
+ if (vector->iv.data)
+ rte_free(vector->iv.data);
+ vector->iv.data = data;
+ vector->iv.phys_addr = rte_malloc_virt2phy(vector->iv.data);
+ vector->iv.length = data_length;
+ } else if (strstr(key_token, "ciphertext")) {
+ if (vector->ciphertext.data)
+ rte_free(vector->ciphertext.data);
+ vector->ciphertext.data = data;
+ vector->ciphertext.length = data_length;
+ } else if (strstr(key_token, "aad")) {
+ if (vector->aad.data)
+ rte_free(vector->aad.data);
+ vector->aad.data = data;
+ vector->aad.phys_addr = rte_malloc_virt2phy(vector->aad.data);
+ vector->aad.length = data_length;
+ } else if (strstr(key_token, "digest")) {
+ if (vector->digest.data)
+ rte_free(vector->digest.data);
+ vector->digest.data = data;
+ vector->digest.phys_addr = rte_malloc_virt2phy(
+ vector->digest.data);
+ vector->digest.length = data_length;
+ } else {
+ printf("Not valid key: '%s'\n", trim(key_token));
+ return -1;
+ }
+
+ return 0;
+}
+
+/* searches in the file for registry keys and values */
+static int
+parse_file(struct cperf_test_vector *v_vec, const char *path)
+{
+ FILE *fp;
+ char *line = NULL, *entry = NULL;
+ ssize_t read;
+ size_t len = 0;
+ int status = 0;
+
+ fp = fopen(path, "r");
+ if (fp == NULL) {
+ printf("File %s does not exists\n", path);
+ return -1;
+ }
+
+ while ((read = getline(&line, &len, fp)) != -1) {
+ /* ignore comments and new lines */
+ if (line[0] == '#' || line[0] == '/' || line[0] == '\n'
+ || line[0] == '\r' || line[0] == ' ')
+ continue;
+
+ trim(line);
+
+ /* buffer for multiline */
+ entry = (char *) rte_realloc(entry,
+ sizeof(char) * strlen(line) + 1, 0);
+ if (entry == NULL)
+ return -1;
+
+ memset(entry, 0, strlen(line) + 1);
+ strncpy(entry, line, strlen(line));
+
+ /* check if entry ends with , or = */
+ if (entry[strlen(entry) - 1] == ','
+ || entry[strlen(entry) - 1] == '=') {
+ while ((read = getline(&line, &len, fp)) != -1) {
+ trim(line);
+
+ /* extend entry about length of new line */
+ char *entry_extended = (char *) rte_realloc(
+ entry, sizeof(char)
+ * (strlen(line) + strlen(entry))
+ + 1, 0);
+
+ if (entry_extended == NULL)
+ goto err;
+ entry = entry_extended;
+
+ strncat(entry, line, strlen(line));
+
+ if (entry[strlen(entry) - 1] != ',')
+ break;
+ }
+ }
+ status = parse_entry(entry, v_vec);
+ if (status) {
+ printf("An error occurred while parsing!\n");
+ goto err;
+ }
+ }
+
+ fclose(fp);
+ free(line);
+ rte_free(entry);
+
+ return 0;
+
+err:
+ if (fp)
+ fclose(fp);
+ if (line)
+ free(line);
+ if (entry)
+ rte_free(entry);
+
+ return -1;
+}
+
+struct cperf_test_vector*
+cperf_test_vector_get_from_file(struct cperf_options *opts)
+{
+ int status;
+ struct cperf_test_vector *test_vector = NULL;
+
+ if (opts == NULL || opts->test_file == NULL)
+ return test_vector;
+
+ test_vector = (struct cperf_test_vector *) rte_zmalloc(NULL,
+ sizeof(struct cperf_test_vector), 0);
+ if (test_vector == NULL)
+ return test_vector;
+
+ /* filling the vector with data from a file */
+ status = parse_file(test_vector, opts->test_file);
+ if (status) {
+ free_test_vector(test_vector, opts);
+ return NULL;
+ }
+
+ /* other values not included in the file */
+ test_vector->data.cipher_offset = 0;
+ test_vector->data.cipher_length = opts->buffer_sz;
+
+ test_vector->data.auth_offset = 0;
+ test_vector->data.auth_length = opts->buffer_sz;
+
+ return test_vector;
+}
--- /dev/null
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_eal.h>
+#include <rte_cryptodev.h>
+
+#include "cperf.h"
+#include "cperf_options.h"
+#include "cperf_test_vector_parsing.h"
+#include "cperf_test_throughput.h"
+#include "cperf_test_latency.h"
+
+const char *cperf_test_type_strs[] = {
+ [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
+ [CPERF_TEST_TYPE_CYCLECOUNT] = "cycle-count",
+ [CPERF_TEST_TYPE_LATENCY] = "latency"
+};
+
+const char *cperf_op_type_strs[] = {
+ [CPERF_CIPHER_ONLY] = "cipher-only",
+ [CPERF_AUTH_ONLY] = "auth-only",
+ [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
+ [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
+ [CPERF_AEAD] = "aead"
+};
+
+const struct cperf_test cperf_testmap[] = {
+ [CPERF_TEST_TYPE_THROUGHPUT] = {
+ cperf_throughput_test_constructor,
+ cperf_throughput_test_runner,
+ cperf_throughput_test_destructor
+ },
+ [CPERF_TEST_TYPE_CYCLECOUNT] = { NULL },
+ [CPERF_TEST_TYPE_LATENCY] = {
+ cperf_latency_test_constructor,
+ cperf_latency_test_runner,
+ cperf_latency_test_destructor
+ }
+};
+
+static int
+cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
+{
+ uint8_t cdev_id, enabled_cdev_count = 0, nb_lcores;
+ int ret;
+
+ enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
+ enabled_cdevs, RTE_DIM(enabled_cdevs));
+ if (enabled_cdev_count == 0) {
+ printf("No crypto devices type %s available\n",
+ opts->device_type);
+ return -EINVAL;
+ }
+
+ nb_lcores = rte_lcore_count() - 1;
+
+ if (enabled_cdev_count > nb_lcores) {
+ printf("Number of capable crypto devices (%d) "
+ "has to be less or equal to number of slave "
+ "cores (%d)\n", enabled_cdev_count, nb_lcores);
+ return -EINVAL;
+ }
+
+ for (cdev_id = 0; cdev_id < enabled_cdev_count &&
+ cdev_id < RTE_CRYPTO_MAX_DEVS; cdev_id++) {
+
+ struct rte_cryptodev_config conf = {
+ .nb_queue_pairs = 1,
+ .socket_id = SOCKET_ID_ANY,
+ .session_mp = {
+ .nb_objs = 2048,
+ .cache_size = 64
+ }
+ };
+ struct rte_cryptodev_qp_conf qp_conf = {
+ .nb_descriptors = 2048
+ };
+
+ ret = rte_cryptodev_configure(enabled_cdevs[cdev_id], &conf);
+ if (ret < 0) {
+ printf("Failed to configure cryptodev %u",
+ enabled_cdevs[cdev_id]);
+ return -EINVAL;
+ }
+
+ ret = rte_cryptodev_queue_pair_setup(enabled_cdevs[cdev_id], 0,
+ &qp_conf, SOCKET_ID_ANY);
+ if (ret < 0) {
+ printf("Failed to setup queue pair %u on "
+ "cryptodev %u", 0, cdev_id);
+ return -EINVAL;
+ }
+
+ ret = rte_cryptodev_start(enabled_cdevs[cdev_id]);
+ if (ret < 0) {
+ printf("Failed to start device %u: error %d\n",
+ enabled_cdevs[cdev_id], ret);
+ return -EPERM;
+ }
+ }
+
+ return enabled_cdev_count;
+}
+
+static int
+cperf_verify_devices_capabilities(struct cperf_options *opts,
+ uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
+{
+ struct rte_cryptodev_sym_capability_idx cap_idx;
+ const struct rte_cryptodev_symmetric_capability *capability;
+
+ uint8_t i, cdev_id;
+ int ret;
+
+ for (i = 0; i < nb_cryptodevs; i++) {
+
+ cdev_id = enabled_cdevs[i];
+
+ if (opts->op_type == CPERF_AUTH_ONLY ||
+ opts->op_type == CPERF_CIPHER_THEN_AUTH ||
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
+
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ cap_idx.algo.auth = opts->auth_algo;
+
+ capability = rte_cryptodev_sym_capability_get(cdev_id,
+ &cap_idx);
+ if (capability == NULL)
+ return -1;
+
+ ret = rte_cryptodev_sym_capability_check_auth(
+ capability,
+ opts->auth_key_sz,
+ opts->auth_digest_sz,
+ opts->auth_aad_sz);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (opts->op_type == CPERF_CIPHER_ONLY ||
+ opts->op_type == CPERF_CIPHER_THEN_AUTH ||
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
+
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cap_idx.algo.cipher = opts->cipher_algo;
+
+ capability = rte_cryptodev_sym_capability_get(cdev_id,
+ &cap_idx);
+ if (capability == NULL)
+ return -1;
+
+ ret = rte_cryptodev_sym_capability_check_cipher(
+ capability,
+ opts->cipher_key_sz,
+ opts->cipher_iv_sz);
+ if (ret != 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+cperf_check_test_vector(struct cperf_options opts,
+ struct cperf_test_vector test_vec)
+{
+ if (opts.op_type == CPERF_CIPHER_ONLY) {
+ if (opts.cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
+ if (test_vec.plaintext.data == NULL)
+ return -1;
+ } else if (opts.cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
+ if (test_vec.plaintext.data == NULL)
+ return -1;
+ if (test_vec.plaintext.length != opts.buffer_sz)
+ return -1;
+ if (test_vec.ciphertext.data == NULL)
+ return -1;
+ if (test_vec.ciphertext.length != opts.buffer_sz)
+ return -1;
+ if (test_vec.iv.data == NULL)
+ return -1;
+ if (test_vec.iv.length != opts.cipher_iv_sz)
+ return -1;
+ if (test_vec.cipher_key.data == NULL)
+ return -1;
+ if (test_vec.cipher_key.length != opts.cipher_key_sz)
+ return -1;
+ }
+ } else if (opts.op_type == CPERF_AUTH_ONLY) {
+ if (opts.auth_algo != RTE_CRYPTO_AUTH_NULL) {
+ if (test_vec.plaintext.data == NULL)
+ return -1;
+ if (test_vec.plaintext.length != opts.buffer_sz)
+ return -1;
+ if (test_vec.auth_key.data == NULL)
+ return -1;
+ if (test_vec.auth_key.length != opts.auth_key_sz)
+ return -1;
+ if (test_vec.digest.data == NULL)
+ return -1;
+ if (test_vec.digest.length != opts.auth_digest_sz)
+ return -1;
+ }
+
+ } else if (opts.op_type == CPERF_CIPHER_THEN_AUTH ||
+ opts.op_type == CPERF_AUTH_THEN_CIPHER) {
+ if (opts.cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
+ if (test_vec.plaintext.data == NULL)
+ return -1;
+ if (test_vec.plaintext.length != opts.buffer_sz)
+ return -1;
+ } else if (opts.cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
+ if (test_vec.plaintext.data == NULL)
+ return -1;
+ if (test_vec.plaintext.length != opts.buffer_sz)
+ return -1;
+ if (test_vec.ciphertext.data == NULL)
+ return -1;
+ if (test_vec.ciphertext.length != opts.buffer_sz)
+ return -1;
+ if (test_vec.iv.data == NULL)
+ return -1;
+ if (test_vec.iv.length != opts.cipher_iv_sz)
+ return -1;
+ if (test_vec.cipher_key.data == NULL)
+ return -1;
+ if (test_vec.cipher_key.length != opts.cipher_key_sz)
+ return -1;
+ }
+ if (opts.auth_algo != RTE_CRYPTO_AUTH_NULL) {
+ if (test_vec.auth_key.data == NULL)
+ return -1;
+ if (test_vec.auth_key.length != opts.auth_key_sz)
+ return -1;
+ if (test_vec.digest.data == NULL)
+ return -1;
+ if (test_vec.digest.length != opts.auth_digest_sz)
+ return -1;
+ }
+ } else if (opts.op_type == CPERF_AEAD) {
+ if (test_vec.plaintext.data == NULL)
+ return -1;
+ if (test_vec.plaintext.length != opts.buffer_sz)
+ return -1;
+ if (test_vec.aad.data == NULL)
+ return -1;
+ if (test_vec.aad.length != opts.auth_aad_sz)
+ return -1;
+ if (test_vec.digest.data == NULL)
+ return -1;
+ if (test_vec.digest.length != opts.auth_digest_sz)
+ return -1;
+ }
+ return 0;
+}
+
+int
+main(int argc, char **argv)
+{
+ struct cperf_options opts = {0};
+ struct cperf_test_vector *t_vec = NULL;
+ struct cperf_op_fns op_fns;
+
+ void *ctx[RTE_MAX_LCORE] = { };
+
+ int nb_cryptodevs;
+ uint8_t cdev_id, i;
+ uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
+
+ int ret;
+ uint32_t lcore_id;
+
+ /* Initialise DPDK EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
+ argc -= ret;
+ argv += ret;
+
+ cperf_options_default(&opts);
+
+ ret = cperf_options_parse(&opts, argc, argv);
+ if (ret) {
+ RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
+ goto err;
+ }
+
+ ret = cperf_options_check(&opts);
+ if (ret) {
+ RTE_LOG(ERR, USER1,
+ "Checking on or more user options failed\n");
+ goto err;
+ }
+
+ if (!opts.silent)
+ cperf_options_dump(&opts);
+
+ nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
+ if (nb_cryptodevs < 1) {
+ RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
+ "device type\n");
+ goto err;
+ }
+
+ ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
+ nb_cryptodevs);
+ if (ret) {
+ RTE_LOG(ERR, USER1, "Crypto device type does not support "
+ "capabilities requested\n");
+ goto err;
+ }
+
+ if (opts.test_file != NULL) {
+ t_vec = cperf_test_vector_get_from_file(&opts);
+ if (t_vec == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Failed to create test vector for"
+ " specified file\n");
+ goto err;
+ }
+
+ if (cperf_check_test_vector(opts, *t_vec)) {
+ RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
+ "\n");
+ goto err;
+ }
+ } else {
+ t_vec = cperf_test_vector_get_dummy(&opts);
+ if (t_vec == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Failed to create test vector for"
+ " specified algorithms\n");
+ goto err;
+ }
+ }
+
+ ret = cperf_get_op_functions(&opts, &op_fns);
+ if (ret) {
+ RTE_LOG(ERR, USER1, "Failed to find function ops set for "
+ "specified algorithms combination\n");
+ goto err;
+ }
+
+ if (!opts.silent)
+ show_test_vector(t_vec);
+
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+ if (i == nb_cryptodevs)
+ break;
+
+ cdev_id = enabled_cdevs[i];
+
+ ctx[cdev_id] = cperf_testmap[opts.test].constructor(cdev_id, 0,
+ &opts, t_vec, &op_fns);
+ if (ctx[cdev_id] == NULL) {
+ RTE_LOG(ERR, USER1, "Test run constructor failed\n");
+ goto err;
+ }
+ i++;
+ }
+
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+ if (i == nb_cryptodevs)
+ break;
+
+ cdev_id = enabled_cdevs[i];
+
+ rte_eal_remote_launch(cperf_testmap[opts.test].runner,
+ ctx[cdev_id], lcore_id);
+ i++;
+ }
+
+ rte_eal_mp_wait_lcore();
+
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+ if (i == nb_cryptodevs)
+ break;
+
+ cdev_id = enabled_cdevs[i];
+
+ cperf_testmap[opts.test].destructor(ctx[cdev_id]);
+ i++;
+ }
+
+ free_test_vector(t_vec, &opts);
+
+ printf("\n");
+ return EXIT_SUCCESS;
+
+err:
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+ if (i == nb_cryptodevs)
+ break;
+
+ cdev_id = enabled_cdevs[i];
+
+ if (ctx[cdev_id] && cperf_testmap[opts.test].destructor)
+ cperf_testmap[opts.test].destructor(ctx[cdev_id]);
+ i++;
+ }
+
+ free_test_vector(t_vec, &opts);
+
+ printf("\n");
+ return EXIT_FAILURE;
+}
CONFIG_RTE_TEST_PMD=y
CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n
CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n
+
+#
+# Compile the crypto performance application
+#
+CONFIG_RTE_APP_CRYPTO_PERF=y