1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
8 #include <rte_malloc.h>
9 #include <rte_random.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
25 const char *cperf_test_type_strs[] = {
26 [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
27 [CPERF_TEST_TYPE_LATENCY] = "latency",
28 [CPERF_TEST_TYPE_VERIFY] = "verify",
29 [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
32 const char *cperf_op_type_strs[] = {
33 [CPERF_CIPHER_ONLY] = "cipher-only",
34 [CPERF_AUTH_ONLY] = "auth-only",
35 [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
36 [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
40 const struct cperf_test cperf_testmap[] = {
41 [CPERF_TEST_TYPE_THROUGHPUT] = {
42 cperf_throughput_test_constructor,
43 cperf_throughput_test_runner,
44 cperf_throughput_test_destructor
46 [CPERF_TEST_TYPE_LATENCY] = {
47 cperf_latency_test_constructor,
48 cperf_latency_test_runner,
49 cperf_latency_test_destructor
51 [CPERF_TEST_TYPE_VERIFY] = {
52 cperf_verify_test_constructor,
53 cperf_verify_test_runner,
54 cperf_verify_test_destructor
56 [CPERF_TEST_TYPE_PMDCC] = {
57 cperf_pmd_cyclecount_test_constructor,
58 cperf_pmd_cyclecount_test_runner,
59 cperf_pmd_cyclecount_test_destructor
64 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
65 struct rte_mempool *session_pool_socket[])
67 uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
68 uint32_t sessions_needed = 0;
72 enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
73 enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
74 if (enabled_cdev_count == 0) {
75 printf("No crypto devices type %s available\n",
80 nb_lcores = rte_lcore_count() - 1;
84 "Number of enabled cores need to be higher than 1\n");
89 * Use less number of devices,
90 * if there are more available than cores.
92 if (enabled_cdev_count > nb_lcores)
93 enabled_cdev_count = nb_lcores;
95 /* Create a mempool shared by all the devices */
96 uint32_t max_sess_size = 0, sess_size;
98 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
99 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
100 if (sess_size > max_sess_size)
101 max_sess_size = sess_size;
105 * Calculate number of needed queue pairs, based on the amount
106 * of available number of logical cores and crypto devices.
107 * For instance, if there are 4 cores and 2 crypto devices,
108 * 2 queue pairs will be set up per device.
110 opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
111 (nb_lcores / enabled_cdev_count) + 1 :
112 nb_lcores / enabled_cdev_count;
114 for (i = 0; i < enabled_cdev_count &&
115 i < RTE_CRYPTO_MAX_DEVS; i++) {
116 cdev_id = enabled_cdevs[i];
117 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
119 * If multi-core scheduler is used, limit the number
120 * of queue pairs to 1, as there is no way to know
121 * how many cores are being used by the PMD, and
122 * how many will be available for the application.
124 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
125 rte_cryptodev_scheduler_mode_get(cdev_id) ==
126 CDEV_SCHED_MODE_MULTICORE)
130 struct rte_cryptodev_info cdev_info;
131 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
133 rte_cryptodev_info_get(cdev_id, &cdev_info);
134 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
135 printf("Number of needed queue pairs is higher "
136 "than the maximum number of queue pairs "
138 printf("Lower the number of cores or increase "
139 "the number of crypto devices\n");
142 struct rte_cryptodev_config conf = {
143 .nb_queue_pairs = opts->nb_qps,
144 .socket_id = socket_id
147 struct rte_cryptodev_qp_conf qp_conf = {
148 .nb_descriptors = opts->nb_descriptors
152 * Device info specifies the min headroom and tailroom
153 * requirement for the crypto PMD. This need to be honoured
154 * by the application, while creating mbuf.
156 if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
157 /* Update headroom */
158 opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
160 if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
161 /* Update tailroom */
162 opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
165 /* Update segment size to include headroom & tailroom */
166 opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
168 uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
170 * Two sessions objects are required for each session
171 * (one for the header, one for the private data)
173 if (!strcmp((const char *)opts->device_type,
174 "crypto_scheduler")) {
175 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
177 rte_cryptodev_scheduler_slaves_get(cdev_id,
180 sessions_needed = 2 * enabled_cdev_count *
181 opts->nb_qps * nb_slaves;
184 sessions_needed = 2 * enabled_cdev_count *
188 * A single session is required per queue pair
191 if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
193 "Device does not support at least "
194 "%u sessions\n", opts->nb_qps);
197 if (session_pool_socket[socket_id] == NULL) {
198 char mp_name[RTE_MEMPOOL_NAMESIZE];
199 struct rte_mempool *sess_mp;
201 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
202 "sess_mp_%u", socket_id);
203 sess_mp = rte_mempool_create(mp_name,
211 if (sess_mp == NULL) {
212 printf("Cannot create session pool on socket %d\n",
217 printf("Allocated session pool on socket %d\n", socket_id);
218 session_pool_socket[socket_id] = sess_mp;
221 ret = rte_cryptodev_configure(cdev_id, &conf);
223 printf("Failed to configure cryptodev %u", cdev_id);
227 for (j = 0; j < opts->nb_qps; j++) {
228 ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
230 session_pool_socket[socket_id]);
232 printf("Failed to setup queue pair %u on "
233 "cryptodev %u", j, cdev_id);
238 ret = rte_cryptodev_start(cdev_id);
240 printf("Failed to start device %u: error %d\n",
246 return enabled_cdev_count;
250 cperf_verify_devices_capabilities(struct cperf_options *opts,
251 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
253 struct rte_cryptodev_sym_capability_idx cap_idx;
254 const struct rte_cryptodev_symmetric_capability *capability;
259 for (i = 0; i < nb_cryptodevs; i++) {
261 cdev_id = enabled_cdevs[i];
263 if (opts->op_type == CPERF_AUTH_ONLY ||
264 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
265 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
267 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
268 cap_idx.algo.auth = opts->auth_algo;
270 capability = rte_cryptodev_sym_capability_get(cdev_id,
272 if (capability == NULL)
275 ret = rte_cryptodev_sym_capability_check_auth(
284 if (opts->op_type == CPERF_CIPHER_ONLY ||
285 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
286 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
288 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
289 cap_idx.algo.cipher = opts->cipher_algo;
291 capability = rte_cryptodev_sym_capability_get(cdev_id,
293 if (capability == NULL)
296 ret = rte_cryptodev_sym_capability_check_cipher(
304 if (opts->op_type == CPERF_AEAD) {
306 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
307 cap_idx.algo.aead = opts->aead_algo;
309 capability = rte_cryptodev_sym_capability_get(cdev_id,
311 if (capability == NULL)
314 ret = rte_cryptodev_sym_capability_check_aead(
329 cperf_check_test_vector(struct cperf_options *opts,
330 struct cperf_test_vector *test_vec)
332 if (opts->op_type == CPERF_CIPHER_ONLY) {
333 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
334 if (test_vec->plaintext.data == NULL)
336 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
337 if (test_vec->plaintext.data == NULL)
339 if (test_vec->plaintext.length < opts->max_buffer_size)
341 if (test_vec->ciphertext.data == NULL)
343 if (test_vec->ciphertext.length < opts->max_buffer_size)
345 if (test_vec->cipher_iv.data == NULL)
347 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
349 if (test_vec->cipher_key.data == NULL)
351 if (test_vec->cipher_key.length != opts->cipher_key_sz)
354 } else if (opts->op_type == CPERF_AUTH_ONLY) {
355 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
356 if (test_vec->plaintext.data == NULL)
358 if (test_vec->plaintext.length < opts->max_buffer_size)
360 if (test_vec->auth_key.data == NULL)
362 if (test_vec->auth_key.length != opts->auth_key_sz)
364 if (test_vec->auth_iv.length != opts->auth_iv_sz)
366 /* Auth IV is only required for some algorithms */
367 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
369 if (test_vec->digest.data == NULL)
371 if (test_vec->digest.length < opts->digest_sz)
375 } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
376 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
377 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
378 if (test_vec->plaintext.data == NULL)
380 if (test_vec->plaintext.length < opts->max_buffer_size)
382 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
383 if (test_vec->plaintext.data == NULL)
385 if (test_vec->plaintext.length < opts->max_buffer_size)
387 if (test_vec->ciphertext.data == NULL)
389 if (test_vec->ciphertext.length < opts->max_buffer_size)
391 if (test_vec->cipher_iv.data == NULL)
393 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
395 if (test_vec->cipher_key.data == NULL)
397 if (test_vec->cipher_key.length != opts->cipher_key_sz)
400 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
401 if (test_vec->auth_key.data == NULL)
403 if (test_vec->auth_key.length != opts->auth_key_sz)
405 if (test_vec->auth_iv.length != opts->auth_iv_sz)
407 /* Auth IV is only required for some algorithms */
408 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
410 if (test_vec->digest.data == NULL)
412 if (test_vec->digest.length < opts->digest_sz)
415 } else if (opts->op_type == CPERF_AEAD) {
416 if (test_vec->plaintext.data == NULL)
418 if (test_vec->plaintext.length < opts->max_buffer_size)
420 if (test_vec->ciphertext.data == NULL)
422 if (test_vec->ciphertext.length < opts->max_buffer_size)
424 if (test_vec->aead_iv.data == NULL)
426 if (test_vec->aead_iv.length != opts->aead_iv_sz)
428 if (test_vec->aad.data == NULL)
430 if (test_vec->aad.length != opts->aead_aad_sz)
432 if (test_vec->digest.data == NULL)
434 if (test_vec->digest.length < opts->digest_sz)
441 main(int argc, char **argv)
443 struct cperf_options opts = {0};
444 struct cperf_test_vector *t_vec = NULL;
445 struct cperf_op_fns op_fns;
447 void *ctx[RTE_MAX_LCORE] = { };
448 struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
450 int nb_cryptodevs = 0;
451 uint16_t total_nb_qps = 0;
453 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
455 uint8_t buffer_size_idx = 0;
460 /* Initialise DPDK EAL */
461 ret = rte_eal_init(argc, argv);
463 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
467 cperf_options_default(&opts);
469 ret = cperf_options_parse(&opts, argc, argv);
471 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
475 ret = cperf_options_check(&opts);
478 "Checking on or more user options failed\n");
482 nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
483 session_pool_socket);
486 cperf_options_dump(&opts);
488 if (nb_cryptodevs < 1) {
489 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
495 ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
498 RTE_LOG(ERR, USER1, "Crypto device type does not support "
499 "capabilities requested\n");
503 if (opts.test_file != NULL) {
504 t_vec = cperf_test_vector_get_from_file(&opts);
507 "Failed to create test vector for"
508 " specified file\n");
512 if (cperf_check_test_vector(&opts, t_vec)) {
513 RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
518 t_vec = cperf_test_vector_get_dummy(&opts);
521 "Failed to create test vector for"
522 " specified algorithms\n");
527 ret = cperf_get_op_functions(&opts, &op_fns);
529 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
530 "specified algorithms combination\n");
535 show_test_vector(t_vec);
537 total_nb_qps = nb_cryptodevs * opts.nb_qps;
540 uint8_t qp_id = 0, cdev_index = 0;
541 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
543 if (i == total_nb_qps)
546 cdev_id = enabled_cdevs[cdev_index];
548 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
550 ctx[i] = cperf_testmap[opts.test].constructor(
551 session_pool_socket[socket_id], cdev_id, qp_id,
552 &opts, t_vec, &op_fns);
553 if (ctx[i] == NULL) {
554 RTE_LOG(ERR, USER1, "Test run constructor failed\n");
557 qp_id = (qp_id + 1) % opts.nb_qps;
563 if (opts.imix_distribution_count != 0) {
564 uint8_t buffer_size_count = opts.buffer_size_count;
565 uint16_t distribution_total[buffer_size_count];
567 uint32_t test_average_size = 0;
568 const uint32_t *buffer_size_list = opts.buffer_size_list;
569 const uint32_t *imix_distribution_list = opts.imix_distribution_list;
571 opts.imix_buffer_sizes = rte_malloc(NULL,
572 sizeof(uint32_t) * opts.pool_sz,
575 * Calculate accumulated distribution of
576 * probabilities per packet size
578 distribution_total[0] = imix_distribution_list[0];
579 for (i = 1; i < buffer_size_count; i++)
580 distribution_total[i] = imix_distribution_list[i] +
581 distribution_total[i-1];
583 /* Calculate a random sequence of packet sizes, based on distribution */
584 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
585 uint16_t random_number = rte_rand() %
586 distribution_total[buffer_size_count - 1];
587 for (i = 0; i < buffer_size_count; i++)
588 if (random_number < distribution_total[i])
591 opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
594 /* Calculate average buffer size for the IMIX distribution */
595 for (i = 0; i < buffer_size_count; i++)
596 test_average_size += buffer_size_list[i] *
597 imix_distribution_list[i];
599 opts.test_buffer_size = test_average_size /
600 distribution_total[buffer_size_count - 1];
603 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
605 if (i == total_nb_qps)
608 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
613 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
615 if (i == total_nb_qps)
617 rte_eal_wait_lcore(lcore_id);
622 /* Get next size from range or list */
623 if (opts.inc_buffer_size != 0)
624 opts.test_buffer_size = opts.min_buffer_size;
626 opts.test_buffer_size = opts.buffer_size_list[0];
628 while (opts.test_buffer_size <= opts.max_buffer_size) {
630 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
632 if (i == total_nb_qps)
635 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
640 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
642 if (i == total_nb_qps)
644 rte_eal_wait_lcore(lcore_id);
648 /* Get next size from range or list */
649 if (opts.inc_buffer_size != 0)
650 opts.test_buffer_size += opts.inc_buffer_size;
652 if (++buffer_size_idx == opts.buffer_size_count)
654 opts.test_buffer_size =
655 opts.buffer_size_list[buffer_size_idx];
661 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
663 if (i == total_nb_qps)
666 cperf_testmap[opts.test].destructor(ctx[i]);
670 for (i = 0; i < nb_cryptodevs &&
671 i < RTE_CRYPTO_MAX_DEVS; i++)
672 rte_cryptodev_stop(enabled_cdevs[i]);
674 free_test_vector(t_vec, &opts);
681 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
682 if (i == total_nb_qps)
685 if (ctx[i] && cperf_testmap[opts.test].destructor)
686 cperf_testmap[opts.test].destructor(ctx[i]);
690 for (i = 0; i < nb_cryptodevs &&
691 i < RTE_CRYPTO_MAX_DEVS; i++)
692 rte_cryptodev_stop(enabled_cdevs[i]);
693 rte_free(opts.imix_buffer_sizes);
694 free_test_vector(t_vec, &opts);