4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_cryptodev.h>
40 #include "cperf_options.h"
41 #include "cperf_test_vector_parsing.h"
42 #include "cperf_test_throughput.h"
43 #include "cperf_test_latency.h"
44 #include "cperf_test_verify.h"
46 const char *cperf_test_type_strs[] = {
47 [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
48 [CPERF_TEST_TYPE_LATENCY] = "latency",
49 [CPERF_TEST_TYPE_VERIFY] = "verify"
52 const char *cperf_op_type_strs[] = {
53 [CPERF_CIPHER_ONLY] = "cipher-only",
54 [CPERF_AUTH_ONLY] = "auth-only",
55 [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
56 [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
60 const struct cperf_test cperf_testmap[] = {
61 [CPERF_TEST_TYPE_THROUGHPUT] = {
62 cperf_throughput_test_constructor,
63 cperf_throughput_test_runner,
64 cperf_throughput_test_destructor
66 [CPERF_TEST_TYPE_LATENCY] = {
67 cperf_latency_test_constructor,
68 cperf_latency_test_runner,
69 cperf_latency_test_destructor
71 [CPERF_TEST_TYPE_VERIFY] = {
72 cperf_verify_test_constructor,
73 cperf_verify_test_runner,
74 cperf_verify_test_destructor
79 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
81 uint8_t cdev_id, enabled_cdev_count = 0, nb_lcores;
84 enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
85 enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
86 if (enabled_cdev_count == 0) {
87 printf("No crypto devices type %s available\n",
92 nb_lcores = rte_lcore_count() - 1;
94 if (enabled_cdev_count > nb_lcores) {
95 printf("Number of capable crypto devices (%d) "
96 "has to be less or equal to number of slave "
97 "cores (%d)\n", enabled_cdev_count, nb_lcores);
101 for (cdev_id = 0; cdev_id < enabled_cdev_count &&
102 cdev_id < RTE_CRYPTO_MAX_DEVS; cdev_id++) {
104 struct rte_cryptodev_config conf = {
106 .socket_id = SOCKET_ID_ANY,
112 struct rte_cryptodev_qp_conf qp_conf = {
113 .nb_descriptors = 2048
116 ret = rte_cryptodev_configure(enabled_cdevs[cdev_id], &conf);
118 printf("Failed to configure cryptodev %u",
119 enabled_cdevs[cdev_id]);
123 ret = rte_cryptodev_queue_pair_setup(enabled_cdevs[cdev_id], 0,
124 &qp_conf, SOCKET_ID_ANY);
126 printf("Failed to setup queue pair %u on "
127 "cryptodev %u", 0, cdev_id);
131 ret = rte_cryptodev_start(enabled_cdevs[cdev_id]);
133 printf("Failed to start device %u: error %d\n",
134 enabled_cdevs[cdev_id], ret);
139 return enabled_cdev_count;
143 cperf_verify_devices_capabilities(struct cperf_options *opts,
144 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
146 struct rte_cryptodev_sym_capability_idx cap_idx;
147 const struct rte_cryptodev_symmetric_capability *capability;
152 for (i = 0; i < nb_cryptodevs; i++) {
154 cdev_id = enabled_cdevs[i];
156 if (opts->op_type == CPERF_AUTH_ONLY ||
157 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
158 opts->op_type == CPERF_AUTH_THEN_CIPHER ||
159 opts->op_type == CPERF_AEAD) {
161 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
162 cap_idx.algo.auth = opts->auth_algo;
164 capability = rte_cryptodev_sym_capability_get(cdev_id,
166 if (capability == NULL)
169 ret = rte_cryptodev_sym_capability_check_auth(
172 opts->auth_digest_sz,
179 if (opts->op_type == CPERF_CIPHER_ONLY ||
180 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
181 opts->op_type == CPERF_AUTH_THEN_CIPHER ||
182 opts->op_type == CPERF_AEAD) {
184 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
185 cap_idx.algo.cipher = opts->cipher_algo;
187 capability = rte_cryptodev_sym_capability_get(cdev_id,
189 if (capability == NULL)
192 ret = rte_cryptodev_sym_capability_check_cipher(
205 cperf_check_test_vector(struct cperf_options *opts,
206 struct cperf_test_vector *test_vec)
208 if (opts->op_type == CPERF_CIPHER_ONLY) {
209 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
210 if (test_vec->plaintext.data == NULL)
212 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
213 if (test_vec->plaintext.data == NULL)
215 if (test_vec->plaintext.length < opts->max_buffer_size)
217 if (test_vec->ciphertext.data == NULL)
219 if (test_vec->ciphertext.length < opts->max_buffer_size)
221 if (test_vec->cipher_iv.data == NULL)
223 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
225 if (test_vec->cipher_key.data == NULL)
227 if (test_vec->cipher_key.length != opts->cipher_key_sz)
230 } else if (opts->op_type == CPERF_AUTH_ONLY) {
231 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
232 if (test_vec->plaintext.data == NULL)
234 if (test_vec->plaintext.length < opts->max_buffer_size)
236 if (test_vec->auth_key.data == NULL)
238 if (test_vec->auth_key.length != opts->auth_key_sz)
240 if (test_vec->auth_iv.length != opts->auth_iv_sz)
242 /* Auth IV is only required for some algorithms */
243 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
245 if (test_vec->digest.data == NULL)
247 if (test_vec->digest.length < opts->auth_digest_sz)
251 } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
252 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
253 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
254 if (test_vec->plaintext.data == NULL)
256 if (test_vec->plaintext.length < opts->max_buffer_size)
258 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
259 if (test_vec->plaintext.data == NULL)
261 if (test_vec->plaintext.length < opts->max_buffer_size)
263 if (test_vec->ciphertext.data == NULL)
265 if (test_vec->ciphertext.length < opts->max_buffer_size)
267 if (test_vec->cipher_iv.data == NULL)
269 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
271 if (test_vec->cipher_key.data == NULL)
273 if (test_vec->cipher_key.length != opts->cipher_key_sz)
276 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
277 if (test_vec->auth_key.data == NULL)
279 if (test_vec->auth_key.length != opts->auth_key_sz)
281 if (test_vec->auth_iv.length != opts->auth_iv_sz)
283 /* Auth IV is only required for some algorithms */
284 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
286 if (test_vec->digest.data == NULL)
288 if (test_vec->digest.length < opts->auth_digest_sz)
291 } else if (opts->op_type == CPERF_AEAD) {
292 if (test_vec->plaintext.data == NULL)
294 if (test_vec->plaintext.length < opts->max_buffer_size)
296 if (test_vec->ciphertext.data == NULL)
298 if (test_vec->ciphertext.length < opts->max_buffer_size)
300 if (test_vec->cipher_iv.data == NULL)
302 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
304 if (test_vec->aad.data == NULL)
306 if (test_vec->aad.length != opts->auth_aad_sz)
308 if (test_vec->digest.data == NULL)
310 if (test_vec->digest.length < opts->auth_digest_sz)
317 main(int argc, char **argv)
319 struct cperf_options opts = {0};
320 struct cperf_test_vector *t_vec = NULL;
321 struct cperf_op_fns op_fns;
323 void *ctx[RTE_MAX_LCORE] = { };
325 int nb_cryptodevs = 0;
327 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
329 uint8_t buffer_size_idx = 0;
334 /* Initialise DPDK EAL */
335 ret = rte_eal_init(argc, argv);
337 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
341 cperf_options_default(&opts);
343 ret = cperf_options_parse(&opts, argc, argv);
345 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
349 ret = cperf_options_check(&opts);
352 "Checking on or more user options failed\n");
357 cperf_options_dump(&opts);
359 nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
360 if (nb_cryptodevs < 1) {
361 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
367 ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
370 RTE_LOG(ERR, USER1, "Crypto device type does not support "
371 "capabilities requested\n");
375 if (opts.test_file != NULL) {
376 t_vec = cperf_test_vector_get_from_file(&opts);
379 "Failed to create test vector for"
380 " specified file\n");
384 if (cperf_check_test_vector(&opts, t_vec)) {
385 RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
390 t_vec = cperf_test_vector_get_dummy(&opts);
393 "Failed to create test vector for"
394 " specified algorithms\n");
399 ret = cperf_get_op_functions(&opts, &op_fns);
401 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
402 "specified algorithms combination\n");
407 show_test_vector(t_vec);
410 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
412 if (i == nb_cryptodevs)
415 cdev_id = enabled_cdevs[i];
417 ctx[cdev_id] = cperf_testmap[opts.test].constructor(cdev_id, 0,
418 &opts, t_vec, &op_fns);
419 if (ctx[cdev_id] == NULL) {
420 RTE_LOG(ERR, USER1, "Test run constructor failed\n");
426 /* Get first size from range or list */
427 if (opts.inc_buffer_size != 0)
428 opts.test_buffer_size = opts.min_buffer_size;
430 opts.test_buffer_size = opts.buffer_size_list[0];
432 while (opts.test_buffer_size <= opts.max_buffer_size) {
434 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
436 if (i == nb_cryptodevs)
439 cdev_id = enabled_cdevs[i];
441 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
442 ctx[cdev_id], lcore_id);
445 rte_eal_mp_wait_lcore();
447 /* Get next size from range or list */
448 if (opts.inc_buffer_size != 0)
449 opts.test_buffer_size += opts.inc_buffer_size;
451 if (++buffer_size_idx == opts.buffer_size_count)
453 opts.test_buffer_size = opts.buffer_size_list[buffer_size_idx];
458 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
460 if (i == nb_cryptodevs)
463 cdev_id = enabled_cdevs[i];
465 cperf_testmap[opts.test].destructor(ctx[cdev_id]);
469 free_test_vector(t_vec, &opts);
476 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
477 if (i == nb_cryptodevs)
480 cdev_id = enabled_cdevs[i];
482 if (ctx[cdev_id] && cperf_testmap[opts.test].destructor)
483 cperf_testmap[opts.test].destructor(ctx[cdev_id]);
487 free_test_vector(t_vec, &opts);