1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
8 #include <rte_malloc.h>
9 #include <rte_random.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
24 #define SESS_MEMPOOL_CACHE_SIZE 64
26 const char *cperf_test_type_strs[] = {
27 [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
28 [CPERF_TEST_TYPE_LATENCY] = "latency",
29 [CPERF_TEST_TYPE_VERIFY] = "verify",
30 [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
33 const char *cperf_op_type_strs[] = {
34 [CPERF_CIPHER_ONLY] = "cipher-only",
35 [CPERF_AUTH_ONLY] = "auth-only",
36 [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
37 [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
41 const struct cperf_test cperf_testmap[] = {
42 [CPERF_TEST_TYPE_THROUGHPUT] = {
43 cperf_throughput_test_constructor,
44 cperf_throughput_test_runner,
45 cperf_throughput_test_destructor
47 [CPERF_TEST_TYPE_LATENCY] = {
48 cperf_latency_test_constructor,
49 cperf_latency_test_runner,
50 cperf_latency_test_destructor
52 [CPERF_TEST_TYPE_VERIFY] = {
53 cperf_verify_test_constructor,
54 cperf_verify_test_runner,
55 cperf_verify_test_destructor
57 [CPERF_TEST_TYPE_PMDCC] = {
58 cperf_pmd_cyclecount_test_constructor,
59 cperf_pmd_cyclecount_test_runner,
60 cperf_pmd_cyclecount_test_destructor
65 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
66 struct rte_mempool *session_pool_socket[])
68 uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
69 uint32_t sessions_needed = 0;
73 enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
74 enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
75 if (enabled_cdev_count == 0) {
76 printf("No crypto devices type %s available\n",
81 nb_lcores = rte_lcore_count() - 1;
85 "Number of enabled cores need to be higher than 1\n");
90 * Use less number of devices,
91 * if there are more available than cores.
93 if (enabled_cdev_count > nb_lcores)
94 enabled_cdev_count = nb_lcores;
96 /* Create a mempool shared by all the devices */
97 uint32_t max_sess_size = 0, sess_size;
99 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
100 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
101 if (sess_size > max_sess_size)
102 max_sess_size = sess_size;
106 * Calculate number of needed queue pairs, based on the amount
107 * of available number of logical cores and crypto devices.
108 * For instance, if there are 4 cores and 2 crypto devices,
109 * 2 queue pairs will be set up per device.
111 opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
112 (nb_lcores / enabled_cdev_count) + 1 :
113 nb_lcores / enabled_cdev_count;
115 for (i = 0; i < enabled_cdev_count &&
116 i < RTE_CRYPTO_MAX_DEVS; i++) {
117 cdev_id = enabled_cdevs[i];
118 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
120 * If multi-core scheduler is used, limit the number
121 * of queue pairs to 1, as there is no way to know
122 * how many cores are being used by the PMD, and
123 * how many will be available for the application.
125 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
126 rte_cryptodev_scheduler_mode_get(cdev_id) ==
127 CDEV_SCHED_MODE_MULTICORE)
131 struct rte_cryptodev_info cdev_info;
132 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
134 rte_cryptodev_info_get(cdev_id, &cdev_info);
135 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
136 printf("Number of needed queue pairs is higher "
137 "than the maximum number of queue pairs "
139 printf("Lower the number of cores or increase "
140 "the number of crypto devices\n");
143 struct rte_cryptodev_config conf = {
144 .nb_queue_pairs = opts->nb_qps,
145 .socket_id = socket_id
148 struct rte_cryptodev_qp_conf qp_conf = {
149 .nb_descriptors = opts->nb_descriptors
153 * Device info specifies the min headroom and tailroom
154 * requirement for the crypto PMD. This need to be honoured
155 * by the application, while creating mbuf.
157 if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
158 /* Update headroom */
159 opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
161 if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
162 /* Update tailroom */
163 opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
166 /* Update segment size to include headroom & tailroom */
167 opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
169 uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
171 * Two sessions objects are required for each session
172 * (one for the header, one for the private data)
174 if (!strcmp((const char *)opts->device_type,
175 "crypto_scheduler")) {
176 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
178 rte_cryptodev_scheduler_slaves_get(cdev_id,
181 sessions_needed = 2 * enabled_cdev_count *
182 opts->nb_qps * nb_slaves;
185 sessions_needed = 2 * enabled_cdev_count *
189 * A single session is required per queue pair
192 if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
194 "Device does not support at least "
195 "%u sessions\n", opts->nb_qps);
198 if (session_pool_socket[socket_id] == NULL) {
199 char mp_name[RTE_MEMPOOL_NAMESIZE];
200 struct rte_mempool *sess_mp;
202 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
203 "sess_mp_%u", socket_id);
204 sess_mp = rte_mempool_create(mp_name,
207 SESS_MEMPOOL_CACHE_SIZE,
212 if (sess_mp == NULL) {
213 printf("Cannot create session pool on socket %d\n",
218 printf("Allocated session pool on socket %d\n", socket_id);
219 session_pool_socket[socket_id] = sess_mp;
222 ret = rte_cryptodev_configure(cdev_id, &conf);
224 printf("Failed to configure cryptodev %u", cdev_id);
228 for (j = 0; j < opts->nb_qps; j++) {
229 ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
231 session_pool_socket[socket_id]);
233 printf("Failed to setup queue pair %u on "
234 "cryptodev %u", j, cdev_id);
239 ret = rte_cryptodev_start(cdev_id);
241 printf("Failed to start device %u: error %d\n",
247 return enabled_cdev_count;
251 cperf_verify_devices_capabilities(struct cperf_options *opts,
252 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
254 struct rte_cryptodev_sym_capability_idx cap_idx;
255 const struct rte_cryptodev_symmetric_capability *capability;
260 for (i = 0; i < nb_cryptodevs; i++) {
262 cdev_id = enabled_cdevs[i];
264 if (opts->op_type == CPERF_AUTH_ONLY ||
265 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
266 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
268 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
269 cap_idx.algo.auth = opts->auth_algo;
271 capability = rte_cryptodev_sym_capability_get(cdev_id,
273 if (capability == NULL)
276 ret = rte_cryptodev_sym_capability_check_auth(
285 if (opts->op_type == CPERF_CIPHER_ONLY ||
286 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
287 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
289 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
290 cap_idx.algo.cipher = opts->cipher_algo;
292 capability = rte_cryptodev_sym_capability_get(cdev_id,
294 if (capability == NULL)
297 ret = rte_cryptodev_sym_capability_check_cipher(
305 if (opts->op_type == CPERF_AEAD) {
307 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
308 cap_idx.algo.aead = opts->aead_algo;
310 capability = rte_cryptodev_sym_capability_get(cdev_id,
312 if (capability == NULL)
315 ret = rte_cryptodev_sym_capability_check_aead(
330 cperf_check_test_vector(struct cperf_options *opts,
331 struct cperf_test_vector *test_vec)
333 if (opts->op_type == CPERF_CIPHER_ONLY) {
334 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
335 if (test_vec->plaintext.data == NULL)
337 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
338 if (test_vec->plaintext.data == NULL)
340 if (test_vec->plaintext.length < opts->max_buffer_size)
342 if (test_vec->ciphertext.data == NULL)
344 if (test_vec->ciphertext.length < opts->max_buffer_size)
346 if (test_vec->cipher_iv.data == NULL)
348 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
350 if (test_vec->cipher_key.data == NULL)
352 if (test_vec->cipher_key.length != opts->cipher_key_sz)
355 } else if (opts->op_type == CPERF_AUTH_ONLY) {
356 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
357 if (test_vec->plaintext.data == NULL)
359 if (test_vec->plaintext.length < opts->max_buffer_size)
361 if (test_vec->auth_key.data == NULL)
363 if (test_vec->auth_key.length != opts->auth_key_sz)
365 if (test_vec->auth_iv.length != opts->auth_iv_sz)
367 /* Auth IV is only required for some algorithms */
368 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
370 if (test_vec->digest.data == NULL)
372 if (test_vec->digest.length < opts->digest_sz)
376 } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
377 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
378 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
379 if (test_vec->plaintext.data == NULL)
381 if (test_vec->plaintext.length < opts->max_buffer_size)
383 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
384 if (test_vec->plaintext.data == NULL)
386 if (test_vec->plaintext.length < opts->max_buffer_size)
388 if (test_vec->ciphertext.data == NULL)
390 if (test_vec->ciphertext.length < opts->max_buffer_size)
392 if (test_vec->cipher_iv.data == NULL)
394 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
396 if (test_vec->cipher_key.data == NULL)
398 if (test_vec->cipher_key.length != opts->cipher_key_sz)
401 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
402 if (test_vec->auth_key.data == NULL)
404 if (test_vec->auth_key.length != opts->auth_key_sz)
406 if (test_vec->auth_iv.length != opts->auth_iv_sz)
408 /* Auth IV is only required for some algorithms */
409 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
411 if (test_vec->digest.data == NULL)
413 if (test_vec->digest.length < opts->digest_sz)
416 } else if (opts->op_type == CPERF_AEAD) {
417 if (test_vec->plaintext.data == NULL)
419 if (test_vec->plaintext.length < opts->max_buffer_size)
421 if (test_vec->ciphertext.data == NULL)
423 if (test_vec->ciphertext.length < opts->max_buffer_size)
425 if (test_vec->aead_iv.data == NULL)
427 if (test_vec->aead_iv.length != opts->aead_iv_sz)
429 if (test_vec->aad.data == NULL)
431 if (test_vec->aad.length != opts->aead_aad_sz)
433 if (test_vec->digest.data == NULL)
435 if (test_vec->digest.length < opts->digest_sz)
442 main(int argc, char **argv)
444 struct cperf_options opts = {0};
445 struct cperf_test_vector *t_vec = NULL;
446 struct cperf_op_fns op_fns;
448 void *ctx[RTE_MAX_LCORE] = { };
449 struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
451 int nb_cryptodevs = 0;
452 uint16_t total_nb_qps = 0;
454 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
456 uint8_t buffer_size_idx = 0;
461 /* Initialise DPDK EAL */
462 ret = rte_eal_init(argc, argv);
464 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
468 cperf_options_default(&opts);
470 ret = cperf_options_parse(&opts, argc, argv);
472 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
476 ret = cperf_options_check(&opts);
479 "Checking on or more user options failed\n");
483 nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
484 session_pool_socket);
487 cperf_options_dump(&opts);
489 if (nb_cryptodevs < 1) {
490 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
496 ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
499 RTE_LOG(ERR, USER1, "Crypto device type does not support "
500 "capabilities requested\n");
504 if (opts.test_file != NULL) {
505 t_vec = cperf_test_vector_get_from_file(&opts);
508 "Failed to create test vector for"
509 " specified file\n");
513 if (cperf_check_test_vector(&opts, t_vec)) {
514 RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
519 t_vec = cperf_test_vector_get_dummy(&opts);
522 "Failed to create test vector for"
523 " specified algorithms\n");
528 ret = cperf_get_op_functions(&opts, &op_fns);
530 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
531 "specified algorithms combination\n");
536 show_test_vector(t_vec);
538 total_nb_qps = nb_cryptodevs * opts.nb_qps;
541 uint8_t qp_id = 0, cdev_index = 0;
542 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
544 if (i == total_nb_qps)
547 cdev_id = enabled_cdevs[cdev_index];
549 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
551 ctx[i] = cperf_testmap[opts.test].constructor(
552 session_pool_socket[socket_id], cdev_id, qp_id,
553 &opts, t_vec, &op_fns);
554 if (ctx[i] == NULL) {
555 RTE_LOG(ERR, USER1, "Test run constructor failed\n");
558 qp_id = (qp_id + 1) % opts.nb_qps;
564 if (opts.imix_distribution_count != 0) {
565 uint8_t buffer_size_count = opts.buffer_size_count;
566 uint16_t distribution_total[buffer_size_count];
568 uint32_t test_average_size = 0;
569 const uint32_t *buffer_size_list = opts.buffer_size_list;
570 const uint32_t *imix_distribution_list = opts.imix_distribution_list;
572 opts.imix_buffer_sizes = rte_malloc(NULL,
573 sizeof(uint32_t) * opts.pool_sz,
576 * Calculate accumulated distribution of
577 * probabilities per packet size
579 distribution_total[0] = imix_distribution_list[0];
580 for (i = 1; i < buffer_size_count; i++)
581 distribution_total[i] = imix_distribution_list[i] +
582 distribution_total[i-1];
584 /* Calculate a random sequence of packet sizes, based on distribution */
585 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
586 uint16_t random_number = rte_rand() %
587 distribution_total[buffer_size_count - 1];
588 for (i = 0; i < buffer_size_count; i++)
589 if (random_number < distribution_total[i])
592 opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
595 /* Calculate average buffer size for the IMIX distribution */
596 for (i = 0; i < buffer_size_count; i++)
597 test_average_size += buffer_size_list[i] *
598 imix_distribution_list[i];
600 opts.test_buffer_size = test_average_size /
601 distribution_total[buffer_size_count - 1];
604 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
606 if (i == total_nb_qps)
609 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
614 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
616 if (i == total_nb_qps)
618 rte_eal_wait_lcore(lcore_id);
623 /* Get next size from range or list */
624 if (opts.inc_buffer_size != 0)
625 opts.test_buffer_size = opts.min_buffer_size;
627 opts.test_buffer_size = opts.buffer_size_list[0];
629 while (opts.test_buffer_size <= opts.max_buffer_size) {
631 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
633 if (i == total_nb_qps)
636 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
641 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
643 if (i == total_nb_qps)
645 rte_eal_wait_lcore(lcore_id);
649 /* Get next size from range or list */
650 if (opts.inc_buffer_size != 0)
651 opts.test_buffer_size += opts.inc_buffer_size;
653 if (++buffer_size_idx == opts.buffer_size_count)
655 opts.test_buffer_size =
656 opts.buffer_size_list[buffer_size_idx];
662 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
664 if (i == total_nb_qps)
667 cperf_testmap[opts.test].destructor(ctx[i]);
671 for (i = 0; i < nb_cryptodevs &&
672 i < RTE_CRYPTO_MAX_DEVS; i++)
673 rte_cryptodev_stop(enabled_cdevs[i]);
675 free_test_vector(t_vec, &opts);
682 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
683 if (i == total_nb_qps)
686 if (ctx[i] && cperf_testmap[opts.test].destructor)
687 cperf_testmap[opts.test].destructor(ctx[i]);
691 for (i = 0; i < nb_cryptodevs &&
692 i < RTE_CRYPTO_MAX_DEVS; i++)
693 rte_cryptodev_stop(enabled_cdevs[i]);
694 rte_free(opts.imix_buffer_sizes);
695 free_test_vector(t_vec, &opts);