1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
8 #include <rte_malloc.h>
9 #include <rte_random.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
25 struct rte_mempool *sess_mp;
26 struct rte_mempool *priv_mp;
27 } session_pool_socket[RTE_MAX_NUMA_NODES];
29 const char *cperf_test_type_strs[] = {
30 [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
31 [CPERF_TEST_TYPE_LATENCY] = "latency",
32 [CPERF_TEST_TYPE_VERIFY] = "verify",
33 [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
36 const char *cperf_op_type_strs[] = {
37 [CPERF_CIPHER_ONLY] = "cipher-only",
38 [CPERF_AUTH_ONLY] = "auth-only",
39 [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
40 [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
41 [CPERF_AEAD] = "aead",
42 [CPERF_PDCP] = "pdcp",
43 [CPERF_DOCSIS] = "docsis",
44 [CPERF_IPSEC] = "ipsec",
45 [CPERF_ASYM_MODEX] = "modex"
48 const struct cperf_test cperf_testmap[] = {
49 [CPERF_TEST_TYPE_THROUGHPUT] = {
50 cperf_throughput_test_constructor,
51 cperf_throughput_test_runner,
52 cperf_throughput_test_destructor
54 [CPERF_TEST_TYPE_LATENCY] = {
55 cperf_latency_test_constructor,
56 cperf_latency_test_runner,
57 cperf_latency_test_destructor
59 [CPERF_TEST_TYPE_VERIFY] = {
60 cperf_verify_test_constructor,
61 cperf_verify_test_runner,
62 cperf_verify_test_destructor
64 [CPERF_TEST_TYPE_PMDCC] = {
65 cperf_pmd_cyclecount_test_constructor,
66 cperf_pmd_cyclecount_test_runner,
67 cperf_pmd_cyclecount_test_destructor
72 create_asym_op_pool_socket(uint8_t dev_id, int32_t socket_id,
75 char mp_name[RTE_MEMPOOL_NAMESIZE];
76 struct rte_mempool *mpool = NULL;
77 unsigned int session_size =
78 RTE_MAX(rte_cryptodev_asym_get_private_session_size(dev_id),
79 rte_cryptodev_asym_get_header_session_size());
81 if (session_pool_socket[socket_id].priv_mp == NULL) {
82 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_priv_pool%u",
85 mpool = rte_mempool_create(mp_name, nb_sessions, session_size,
86 0, 0, NULL, NULL, NULL, NULL,
89 printf("Cannot create pool \"%s\" on socket %d\n",
93 printf("Allocated pool \"%s\" on socket %d\n", mp_name,
95 session_pool_socket[socket_id].priv_mp = mpool;
98 if (session_pool_socket[socket_id].sess_mp == NULL) {
100 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_sess_pool%u",
102 mpool = rte_mempool_create(mp_name, nb_sessions,
103 session_size, 0, 0, NULL, NULL, NULL,
106 printf("Cannot create pool \"%s\" on socket %d\n",
110 session_pool_socket[socket_id].sess_mp = mpool;
116 fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size,
117 uint32_t nb_sessions)
119 char mp_name[RTE_MEMPOOL_NAMESIZE];
120 struct rte_mempool *sess_mp;
122 if (session_pool_socket[socket_id].priv_mp == NULL) {
123 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
124 "priv_sess_mp_%u", socket_id);
126 sess_mp = rte_mempool_create(mp_name,
129 0, 0, NULL, NULL, NULL,
133 if (sess_mp == NULL) {
134 printf("Cannot create pool \"%s\" on socket %d\n",
139 printf("Allocated pool \"%s\" on socket %d\n",
141 session_pool_socket[socket_id].priv_mp = sess_mp;
144 if (session_pool_socket[socket_id].sess_mp == NULL) {
146 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
147 "sess_mp_%u", socket_id);
149 sess_mp = rte_cryptodev_sym_session_pool_create(mp_name,
150 nb_sessions, 0, 0, 0, socket_id);
152 if (sess_mp == NULL) {
153 printf("Cannot create pool \"%s\" on socket %d\n",
158 printf("Allocated pool \"%s\" on socket %d\n",
160 session_pool_socket[socket_id].sess_mp = sess_mp;
167 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
169 uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
170 uint32_t sessions_needed = 0;
174 enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
175 enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
176 if (enabled_cdev_count == 0) {
177 printf("No crypto devices type %s available\n",
182 nb_lcores = rte_lcore_count() - 1;
186 "Number of enabled cores need to be higher than 1\n");
191 * Use less number of devices,
192 * if there are more available than cores.
194 if (enabled_cdev_count > nb_lcores)
195 enabled_cdev_count = nb_lcores;
197 /* Create a mempool shared by all the devices */
198 uint32_t max_sess_size = 0, sess_size;
200 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
201 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
202 if (sess_size > max_sess_size)
203 max_sess_size = sess_size;
205 #ifdef RTE_LIB_SECURITY
206 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
207 sess_size = rte_security_session_get_size(
208 rte_cryptodev_get_sec_ctx(cdev_id));
209 if (sess_size > max_sess_size)
210 max_sess_size = sess_size;
214 * Calculate number of needed queue pairs, based on the amount
215 * of available number of logical cores and crypto devices.
216 * For instance, if there are 4 cores and 2 crypto devices,
217 * 2 queue pairs will be set up per device.
219 opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
220 (nb_lcores / enabled_cdev_count) + 1 :
221 nb_lcores / enabled_cdev_count;
223 for (i = 0; i < enabled_cdev_count &&
224 i < RTE_CRYPTO_MAX_DEVS; i++) {
225 cdev_id = enabled_cdevs[i];
226 #ifdef RTE_CRYPTO_SCHEDULER
228 * If multi-core scheduler is used, limit the number
229 * of queue pairs to 1, as there is no way to know
230 * how many cores are being used by the PMD, and
231 * how many will be available for the application.
233 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
234 rte_cryptodev_scheduler_mode_get(cdev_id) ==
235 CDEV_SCHED_MODE_MULTICORE)
239 struct rte_cryptodev_info cdev_info;
240 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
241 /* range check the socket_id - negative values become big
242 * positive ones due to use of unsigned value
244 if (socket_id >= RTE_MAX_NUMA_NODES)
247 rte_cryptodev_info_get(cdev_id, &cdev_info);
249 if (opts->op_type == CPERF_ASYM_MODEX) {
250 if ((cdev_info.feature_flags &
251 RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) == 0)
255 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
256 printf("Number of needed queue pairs is higher "
257 "than the maximum number of queue pairs "
259 printf("Lower the number of cores or increase "
260 "the number of crypto devices\n");
263 struct rte_cryptodev_config conf = {
264 .nb_queue_pairs = opts->nb_qps,
265 .socket_id = socket_id,
268 switch (opts->op_type) {
269 case CPERF_ASYM_MODEX:
270 conf.ff_disable |= (RTE_CRYPTODEV_FF_SECURITY |
271 RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO);
273 case CPERF_CIPHER_ONLY:
274 case CPERF_AUTH_ONLY:
275 case CPERF_CIPHER_THEN_AUTH:
276 case CPERF_AUTH_THEN_CIPHER:
278 conf.ff_disable |= RTE_CRYPTODEV_FF_SECURITY;
285 conf.ff_disable |= RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
288 struct rte_cryptodev_qp_conf qp_conf = {
289 .nb_descriptors = opts->nb_descriptors
293 * Device info specifies the min headroom and tailroom
294 * requirement for the crypto PMD. This need to be honoured
295 * by the application, while creating mbuf.
297 if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
298 /* Update headroom */
299 opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
301 if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
302 /* Update tailroom */
303 opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
306 /* Update segment size to include headroom & tailroom */
307 opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
309 uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
311 * Two sessions objects are required for each session
312 * (one for the header, one for the private data)
314 if (!strcmp((const char *)opts->device_type,
315 "crypto_scheduler")) {
316 #ifdef RTE_CRYPTO_SCHEDULER
318 rte_cryptodev_scheduler_workers_get(cdev_id,
321 sessions_needed = enabled_cdev_count *
322 opts->nb_qps * nb_slaves;
325 sessions_needed = enabled_cdev_count * opts->nb_qps;
328 * A single session is required per queue pair
331 if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
333 "Device does not support at least "
334 "%u sessions\n", opts->nb_qps);
338 if (opts->op_type == CPERF_ASYM_MODEX)
339 ret = create_asym_op_pool_socket(cdev_id, socket_id,
342 ret = fill_session_pool_socket(socket_id, max_sess_size,
347 qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
348 qp_conf.mp_session_private =
349 session_pool_socket[socket_id].priv_mp;
351 if (opts->op_type == CPERF_ASYM_MODEX) {
352 qp_conf.mp_session = NULL;
353 qp_conf.mp_session_private = NULL;
356 ret = rte_cryptodev_configure(cdev_id, &conf);
358 printf("Failed to configure cryptodev %u", cdev_id);
362 for (j = 0; j < opts->nb_qps; j++) {
363 ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
364 &qp_conf, socket_id);
366 printf("Failed to setup queue pair %u on "
367 "cryptodev %u", j, cdev_id);
372 ret = rte_cryptodev_start(cdev_id);
374 printf("Failed to start device %u: error %d\n",
380 return enabled_cdev_count;
384 cperf_verify_devices_capabilities(struct cperf_options *opts,
385 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
387 struct rte_cryptodev_sym_capability_idx cap_idx;
388 const struct rte_cryptodev_symmetric_capability *capability;
389 struct rte_cryptodev_asym_capability_idx asym_cap_idx;
390 const struct rte_cryptodev_asymmetric_xform_capability *asym_capability;
396 for (i = 0; i < nb_cryptodevs; i++) {
398 cdev_id = enabled_cdevs[i];
400 if (opts->op_type == CPERF_ASYM_MODEX) {
401 asym_cap_idx.type = RTE_CRYPTO_ASYM_XFORM_MODEX;
402 asym_capability = rte_cryptodev_asym_capability_get(
403 cdev_id, &asym_cap_idx);
404 if (asym_capability == NULL)
407 ret = rte_cryptodev_asym_xform_capability_check_modlen(
408 asym_capability, sizeof(perf_mod_p));
414 if (opts->op_type == CPERF_AUTH_ONLY ||
415 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
416 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
418 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
419 cap_idx.algo.auth = opts->auth_algo;
421 capability = rte_cryptodev_sym_capability_get(cdev_id,
423 if (capability == NULL)
426 ret = rte_cryptodev_sym_capability_check_auth(
435 if (opts->op_type == CPERF_CIPHER_ONLY ||
436 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
437 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
439 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
440 cap_idx.algo.cipher = opts->cipher_algo;
442 capability = rte_cryptodev_sym_capability_get(cdev_id,
444 if (capability == NULL)
447 ret = rte_cryptodev_sym_capability_check_cipher(
455 if (opts->op_type == CPERF_AEAD) {
457 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
458 cap_idx.algo.aead = opts->aead_algo;
460 capability = rte_cryptodev_sym_capability_get(cdev_id,
462 if (capability == NULL)
465 ret = rte_cryptodev_sym_capability_check_aead(
480 cperf_check_test_vector(struct cperf_options *opts,
481 struct cperf_test_vector *test_vec)
483 if (opts->op_type == CPERF_CIPHER_ONLY) {
484 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
485 if (test_vec->plaintext.data == NULL)
488 if (test_vec->plaintext.data == NULL)
490 if (test_vec->plaintext.length < opts->max_buffer_size)
492 if (test_vec->ciphertext.data == NULL)
494 if (test_vec->ciphertext.length < opts->max_buffer_size)
496 /* Cipher IV is only required for some algorithms */
497 if (opts->cipher_iv_sz &&
498 test_vec->cipher_iv.data == NULL)
500 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
502 if (test_vec->cipher_key.data == NULL)
504 if (test_vec->cipher_key.length != opts->cipher_key_sz)
507 } else if (opts->op_type == CPERF_AUTH_ONLY) {
508 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
509 if (test_vec->plaintext.data == NULL)
511 if (test_vec->plaintext.length < opts->max_buffer_size)
513 /* Auth key is only required for some algorithms */
514 if (opts->auth_key_sz &&
515 test_vec->auth_key.data == NULL)
517 if (test_vec->auth_key.length != opts->auth_key_sz)
519 if (test_vec->auth_iv.length != opts->auth_iv_sz)
521 /* Auth IV is only required for some algorithms */
522 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
524 if (test_vec->digest.data == NULL)
526 if (test_vec->digest.length < opts->digest_sz)
530 } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
531 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
532 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
533 if (test_vec->plaintext.data == NULL)
535 if (test_vec->plaintext.length < opts->max_buffer_size)
538 if (test_vec->plaintext.data == NULL)
540 if (test_vec->plaintext.length < opts->max_buffer_size)
542 if (test_vec->ciphertext.data == NULL)
544 if (test_vec->ciphertext.length < opts->max_buffer_size)
546 if (test_vec->cipher_iv.data == NULL)
548 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
550 if (test_vec->cipher_key.data == NULL)
552 if (test_vec->cipher_key.length != opts->cipher_key_sz)
555 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
556 if (test_vec->auth_key.data == NULL)
558 if (test_vec->auth_key.length != opts->auth_key_sz)
560 if (test_vec->auth_iv.length != opts->auth_iv_sz)
562 /* Auth IV is only required for some algorithms */
563 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
565 if (test_vec->digest.data == NULL)
567 if (test_vec->digest.length < opts->digest_sz)
570 } else if (opts->op_type == CPERF_AEAD) {
571 if (test_vec->plaintext.data == NULL)
573 if (test_vec->plaintext.length < opts->max_buffer_size)
575 if (test_vec->ciphertext.data == NULL)
577 if (test_vec->ciphertext.length < opts->max_buffer_size)
579 if (test_vec->aead_key.data == NULL)
581 if (test_vec->aead_key.length != opts->aead_key_sz)
583 if (test_vec->aead_iv.data == NULL)
585 if (test_vec->aead_iv.length != opts->aead_iv_sz)
587 if (test_vec->aad.data == NULL)
589 if (test_vec->aad.length != opts->aead_aad_sz)
591 if (test_vec->digest.data == NULL)
593 if (test_vec->digest.length < opts->digest_sz)
600 main(int argc, char **argv)
602 struct cperf_options opts = {0};
603 struct cperf_test_vector *t_vec = NULL;
604 struct cperf_op_fns op_fns;
605 void *ctx[RTE_MAX_LCORE] = { };
606 int nb_cryptodevs = 0;
607 uint16_t total_nb_qps = 0;
609 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
611 uint8_t buffer_size_idx = 0;
616 /* Initialise DPDK EAL */
617 ret = rte_eal_init(argc, argv);
619 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
623 cperf_options_default(&opts);
625 ret = cperf_options_parse(&opts, argc, argv);
627 RTE_LOG(ERR, USER1, "Parsing one or more user options failed\n");
631 ret = cperf_options_check(&opts);
634 "Checking one or more user options failed\n");
638 nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
641 cperf_options_dump(&opts);
643 if (nb_cryptodevs < 1) {
644 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
650 ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
653 RTE_LOG(ERR, USER1, "Crypto device type does not support "
654 "capabilities requested\n");
658 if (opts.test_file != NULL) {
659 t_vec = cperf_test_vector_get_from_file(&opts);
662 "Failed to create test vector for"
663 " specified file\n");
667 if (cperf_check_test_vector(&opts, t_vec)) {
668 RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
673 t_vec = cperf_test_vector_get_dummy(&opts);
676 "Failed to create test vector for"
677 " specified algorithms\n");
682 ret = cperf_get_op_functions(&opts, &op_fns);
684 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
685 "specified algorithms combination\n");
689 if (!opts.silent && opts.test != CPERF_TEST_TYPE_THROUGHPUT &&
690 opts.test != CPERF_TEST_TYPE_LATENCY)
691 show_test_vector(t_vec);
693 total_nb_qps = nb_cryptodevs * opts.nb_qps;
696 uint8_t qp_id = 0, cdev_index = 0;
697 RTE_LCORE_FOREACH_WORKER(lcore_id) {
699 if (i == total_nb_qps)
702 cdev_id = enabled_cdevs[cdev_index];
704 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
706 ctx[i] = cperf_testmap[opts.test].constructor(
707 session_pool_socket[socket_id].sess_mp,
708 session_pool_socket[socket_id].priv_mp,
710 &opts, t_vec, &op_fns);
711 if (ctx[i] == NULL) {
712 RTE_LOG(ERR, USER1, "Test run constructor failed\n");
715 qp_id = (qp_id + 1) % opts.nb_qps;
721 if (opts.imix_distribution_count != 0) {
722 uint8_t buffer_size_count = opts.buffer_size_count;
723 uint16_t distribution_total[buffer_size_count];
725 uint32_t test_average_size = 0;
726 const uint32_t *buffer_size_list = opts.buffer_size_list;
727 const uint32_t *imix_distribution_list = opts.imix_distribution_list;
729 opts.imix_buffer_sizes = rte_malloc(NULL,
730 sizeof(uint32_t) * opts.pool_sz,
733 * Calculate accumulated distribution of
734 * probabilities per packet size
736 distribution_total[0] = imix_distribution_list[0];
737 for (i = 1; i < buffer_size_count; i++)
738 distribution_total[i] = imix_distribution_list[i] +
739 distribution_total[i-1];
741 /* Calculate a random sequence of packet sizes, based on distribution */
742 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
743 uint16_t random_number = rte_rand() %
744 distribution_total[buffer_size_count - 1];
745 for (i = 0; i < buffer_size_count; i++)
746 if (random_number < distribution_total[i])
749 opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
752 /* Calculate average buffer size for the IMIX distribution */
753 for (i = 0; i < buffer_size_count; i++)
754 test_average_size += buffer_size_list[i] *
755 imix_distribution_list[i];
757 opts.test_buffer_size = test_average_size /
758 distribution_total[buffer_size_count - 1];
761 RTE_LCORE_FOREACH_WORKER(lcore_id) {
763 if (i == total_nb_qps)
766 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
771 RTE_LCORE_FOREACH_WORKER(lcore_id) {
773 if (i == total_nb_qps)
775 ret |= rte_eal_wait_lcore(lcore_id);
779 if (ret != EXIT_SUCCESS)
783 /* Get next size from range or list */
784 if (opts.inc_buffer_size != 0)
785 opts.test_buffer_size = opts.min_buffer_size;
787 opts.test_buffer_size = opts.buffer_size_list[0];
789 while (opts.test_buffer_size <= opts.max_buffer_size) {
791 RTE_LCORE_FOREACH_WORKER(lcore_id) {
793 if (i == total_nb_qps)
796 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
801 RTE_LCORE_FOREACH_WORKER(lcore_id) {
803 if (i == total_nb_qps)
805 ret |= rte_eal_wait_lcore(lcore_id);
809 if (ret != EXIT_SUCCESS)
812 /* Get next size from range or list */
813 if (opts.inc_buffer_size != 0)
814 opts.test_buffer_size += opts.inc_buffer_size;
816 if (++buffer_size_idx == opts.buffer_size_count)
818 opts.test_buffer_size =
819 opts.buffer_size_list[buffer_size_idx];
825 RTE_LCORE_FOREACH_WORKER(lcore_id) {
827 if (i == total_nb_qps)
830 cperf_testmap[opts.test].destructor(ctx[i]);
834 for (i = 0; i < nb_cryptodevs &&
835 i < RTE_CRYPTO_MAX_DEVS; i++) {
836 rte_cryptodev_stop(enabled_cdevs[i]);
837 ret = rte_cryptodev_close(enabled_cdevs[i]);
840 "Crypto device close error %d\n", ret);
843 free_test_vector(t_vec, &opts);
850 RTE_LCORE_FOREACH_WORKER(lcore_id) {
851 if (i == total_nb_qps)
854 if (ctx[i] && cperf_testmap[opts.test].destructor)
855 cperf_testmap[opts.test].destructor(ctx[i]);
859 for (i = 0; i < nb_cryptodevs &&
860 i < RTE_CRYPTO_MAX_DEVS; i++) {
861 rte_cryptodev_stop(enabled_cdevs[i]);
862 ret = rte_cryptodev_close(enabled_cdevs[i]);
865 "Crypto device close error %d\n", ret);
868 rte_free(opts.imix_buffer_sizes);
869 free_test_vector(t_vec, &opts);