1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
8 #include <rte_malloc.h>
9 #include <rte_random.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
25 struct rte_mempool *sess_mp;
26 struct rte_mempool *priv_mp;
27 } session_pool_socket[RTE_MAX_NUMA_NODES];
29 const char *cperf_test_type_strs[] = {
30 [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
31 [CPERF_TEST_TYPE_LATENCY] = "latency",
32 [CPERF_TEST_TYPE_VERIFY] = "verify",
33 [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
36 const char *cperf_op_type_strs[] = {
37 [CPERF_CIPHER_ONLY] = "cipher-only",
38 [CPERF_AUTH_ONLY] = "auth-only",
39 [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
40 [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
41 [CPERF_AEAD] = "aead",
42 [CPERF_PDCP] = "pdcp",
43 [CPERF_DOCSIS] = "docsis",
44 [CPERF_ASYM_MODEX] = "modex"
47 const struct cperf_test cperf_testmap[] = {
48 [CPERF_TEST_TYPE_THROUGHPUT] = {
49 cperf_throughput_test_constructor,
50 cperf_throughput_test_runner,
51 cperf_throughput_test_destructor
53 [CPERF_TEST_TYPE_LATENCY] = {
54 cperf_latency_test_constructor,
55 cperf_latency_test_runner,
56 cperf_latency_test_destructor
58 [CPERF_TEST_TYPE_VERIFY] = {
59 cperf_verify_test_constructor,
60 cperf_verify_test_runner,
61 cperf_verify_test_destructor
63 [CPERF_TEST_TYPE_PMDCC] = {
64 cperf_pmd_cyclecount_test_constructor,
65 cperf_pmd_cyclecount_test_runner,
66 cperf_pmd_cyclecount_test_destructor
71 create_asym_op_pool_socket(uint8_t dev_id, int32_t socket_id,
74 char mp_name[RTE_MEMPOOL_NAMESIZE];
75 struct rte_mempool *mpool = NULL;
76 unsigned int session_size =
77 RTE_MAX(rte_cryptodev_asym_get_private_session_size(dev_id),
78 rte_cryptodev_asym_get_header_session_size());
80 if (session_pool_socket[socket_id].priv_mp == NULL) {
81 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_priv_pool%u",
84 mpool = rte_mempool_create(mp_name, nb_sessions, session_size,
85 0, 0, NULL, NULL, NULL, NULL,
88 printf("Cannot create pool \"%s\" on socket %d\n",
92 printf("Allocated pool \"%s\" on socket %d\n", mp_name,
94 session_pool_socket[socket_id].priv_mp = mpool;
97 if (session_pool_socket[socket_id].sess_mp == NULL) {
99 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_sess_pool%u",
101 mpool = rte_mempool_create(mp_name, nb_sessions,
102 session_size, 0, 0, NULL, NULL, NULL,
105 printf("Cannot create pool \"%s\" on socket %d\n",
109 session_pool_socket[socket_id].sess_mp = mpool;
115 fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size,
116 uint32_t nb_sessions)
118 char mp_name[RTE_MEMPOOL_NAMESIZE];
119 struct rte_mempool *sess_mp;
121 if (session_pool_socket[socket_id].priv_mp == NULL) {
122 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
123 "priv_sess_mp_%u", socket_id);
125 sess_mp = rte_mempool_create(mp_name,
128 0, 0, NULL, NULL, NULL,
132 if (sess_mp == NULL) {
133 printf("Cannot create pool \"%s\" on socket %d\n",
138 printf("Allocated pool \"%s\" on socket %d\n",
140 session_pool_socket[socket_id].priv_mp = sess_mp;
143 if (session_pool_socket[socket_id].sess_mp == NULL) {
145 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
146 "sess_mp_%u", socket_id);
148 sess_mp = rte_cryptodev_sym_session_pool_create(mp_name,
149 nb_sessions, 0, 0, 0, socket_id);
151 if (sess_mp == NULL) {
152 printf("Cannot create pool \"%s\" on socket %d\n",
157 printf("Allocated pool \"%s\" on socket %d\n",
159 session_pool_socket[socket_id].sess_mp = sess_mp;
166 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
168 uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
169 uint32_t sessions_needed = 0;
173 enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
174 enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
175 if (enabled_cdev_count == 0) {
176 printf("No crypto devices type %s available\n",
181 nb_lcores = rte_lcore_count() - 1;
185 "Number of enabled cores need to be higher than 1\n");
190 * Use less number of devices,
191 * if there are more available than cores.
193 if (enabled_cdev_count > nb_lcores)
194 enabled_cdev_count = nb_lcores;
196 /* Create a mempool shared by all the devices */
197 uint32_t max_sess_size = 0, sess_size;
199 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
200 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
201 if (sess_size > max_sess_size)
202 max_sess_size = sess_size;
204 #ifdef RTE_LIB_SECURITY
205 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
206 sess_size = rte_security_session_get_size(
207 rte_cryptodev_get_sec_ctx(cdev_id));
208 if (sess_size > max_sess_size)
209 max_sess_size = sess_size;
213 * Calculate number of needed queue pairs, based on the amount
214 * of available number of logical cores and crypto devices.
215 * For instance, if there are 4 cores and 2 crypto devices,
216 * 2 queue pairs will be set up per device.
218 opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
219 (nb_lcores / enabled_cdev_count) + 1 :
220 nb_lcores / enabled_cdev_count;
222 for (i = 0; i < enabled_cdev_count &&
223 i < RTE_CRYPTO_MAX_DEVS; i++) {
224 cdev_id = enabled_cdevs[i];
225 #ifdef RTE_CRYPTO_SCHEDULER
227 * If multi-core scheduler is used, limit the number
228 * of queue pairs to 1, as there is no way to know
229 * how many cores are being used by the PMD, and
230 * how many will be available for the application.
232 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
233 rte_cryptodev_scheduler_mode_get(cdev_id) ==
234 CDEV_SCHED_MODE_MULTICORE)
238 struct rte_cryptodev_info cdev_info;
239 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
240 /* range check the socket_id - negative values become big
241 * positive ones due to use of unsigned value
243 if (socket_id >= RTE_MAX_NUMA_NODES)
246 rte_cryptodev_info_get(cdev_id, &cdev_info);
248 if (opts->op_type == CPERF_ASYM_MODEX) {
249 if ((cdev_info.feature_flags &
250 RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) == 0)
254 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
255 printf("Number of needed queue pairs is higher "
256 "than the maximum number of queue pairs "
258 printf("Lower the number of cores or increase "
259 "the number of crypto devices\n");
262 struct rte_cryptodev_config conf = {
263 .nb_queue_pairs = opts->nb_qps,
264 .socket_id = socket_id,
267 switch (opts->op_type) {
268 case CPERF_ASYM_MODEX:
269 conf.ff_disable |= (RTE_CRYPTODEV_FF_SECURITY |
270 RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO);
272 case CPERF_CIPHER_ONLY:
273 case CPERF_AUTH_ONLY:
274 case CPERF_CIPHER_THEN_AUTH:
275 case CPERF_AUTH_THEN_CIPHER:
277 conf.ff_disable |= RTE_CRYPTODEV_FF_SECURITY;
284 conf.ff_disable |= RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
287 struct rte_cryptodev_qp_conf qp_conf = {
288 .nb_descriptors = opts->nb_descriptors
292 * Device info specifies the min headroom and tailroom
293 * requirement for the crypto PMD. This need to be honoured
294 * by the application, while creating mbuf.
296 if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
297 /* Update headroom */
298 opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
300 if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
301 /* Update tailroom */
302 opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
305 /* Update segment size to include headroom & tailroom */
306 opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
308 uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
310 * Two sessions objects are required for each session
311 * (one for the header, one for the private data)
313 if (!strcmp((const char *)opts->device_type,
314 "crypto_scheduler")) {
315 #ifdef RTE_CRYPTO_SCHEDULER
317 rte_cryptodev_scheduler_workers_get(cdev_id,
320 sessions_needed = enabled_cdev_count *
321 opts->nb_qps * nb_slaves;
324 sessions_needed = enabled_cdev_count * opts->nb_qps;
327 * A single session is required per queue pair
330 if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
332 "Device does not support at least "
333 "%u sessions\n", opts->nb_qps);
337 if (opts->op_type == CPERF_ASYM_MODEX)
338 ret = create_asym_op_pool_socket(cdev_id, socket_id,
341 ret = fill_session_pool_socket(socket_id, max_sess_size,
346 qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
347 qp_conf.mp_session_private =
348 session_pool_socket[socket_id].priv_mp;
350 if (opts->op_type == CPERF_ASYM_MODEX) {
351 qp_conf.mp_session = NULL;
352 qp_conf.mp_session_private = NULL;
355 ret = rte_cryptodev_configure(cdev_id, &conf);
357 printf("Failed to configure cryptodev %u", cdev_id);
361 for (j = 0; j < opts->nb_qps; j++) {
362 ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
363 &qp_conf, socket_id);
365 printf("Failed to setup queue pair %u on "
366 "cryptodev %u", j, cdev_id);
371 ret = rte_cryptodev_start(cdev_id);
373 printf("Failed to start device %u: error %d\n",
379 return enabled_cdev_count;
383 cperf_verify_devices_capabilities(struct cperf_options *opts,
384 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
386 struct rte_cryptodev_sym_capability_idx cap_idx;
387 const struct rte_cryptodev_symmetric_capability *capability;
388 struct rte_cryptodev_asym_capability_idx asym_cap_idx;
389 const struct rte_cryptodev_asymmetric_xform_capability *asym_capability;
395 for (i = 0; i < nb_cryptodevs; i++) {
397 cdev_id = enabled_cdevs[i];
399 if (opts->op_type == CPERF_ASYM_MODEX) {
400 asym_cap_idx.type = RTE_CRYPTO_ASYM_XFORM_MODEX;
401 asym_capability = rte_cryptodev_asym_capability_get(
402 cdev_id, &asym_cap_idx);
403 if (asym_capability == NULL)
406 ret = rte_cryptodev_asym_xform_capability_check_modlen(
407 asym_capability, sizeof(perf_mod_p));
413 if (opts->op_type == CPERF_AUTH_ONLY ||
414 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
415 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
417 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
418 cap_idx.algo.auth = opts->auth_algo;
420 capability = rte_cryptodev_sym_capability_get(cdev_id,
422 if (capability == NULL)
425 ret = rte_cryptodev_sym_capability_check_auth(
434 if (opts->op_type == CPERF_CIPHER_ONLY ||
435 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
436 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
438 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
439 cap_idx.algo.cipher = opts->cipher_algo;
441 capability = rte_cryptodev_sym_capability_get(cdev_id,
443 if (capability == NULL)
446 ret = rte_cryptodev_sym_capability_check_cipher(
454 if (opts->op_type == CPERF_AEAD) {
456 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
457 cap_idx.algo.aead = opts->aead_algo;
459 capability = rte_cryptodev_sym_capability_get(cdev_id,
461 if (capability == NULL)
464 ret = rte_cryptodev_sym_capability_check_aead(
479 cperf_check_test_vector(struct cperf_options *opts,
480 struct cperf_test_vector *test_vec)
482 if (opts->op_type == CPERF_CIPHER_ONLY) {
483 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
484 if (test_vec->plaintext.data == NULL)
487 if (test_vec->plaintext.data == NULL)
489 if (test_vec->plaintext.length < opts->max_buffer_size)
491 if (test_vec->ciphertext.data == NULL)
493 if (test_vec->ciphertext.length < opts->max_buffer_size)
495 /* Cipher IV is only required for some algorithms */
496 if (opts->cipher_iv_sz &&
497 test_vec->cipher_iv.data == NULL)
499 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
501 if (test_vec->cipher_key.data == NULL)
503 if (test_vec->cipher_key.length != opts->cipher_key_sz)
506 } else if (opts->op_type == CPERF_AUTH_ONLY) {
507 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
508 if (test_vec->plaintext.data == NULL)
510 if (test_vec->plaintext.length < opts->max_buffer_size)
512 /* Auth key is only required for some algorithms */
513 if (opts->auth_key_sz &&
514 test_vec->auth_key.data == NULL)
516 if (test_vec->auth_key.length != opts->auth_key_sz)
518 if (test_vec->auth_iv.length != opts->auth_iv_sz)
520 /* Auth IV is only required for some algorithms */
521 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
523 if (test_vec->digest.data == NULL)
525 if (test_vec->digest.length < opts->digest_sz)
529 } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
530 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
531 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
532 if (test_vec->plaintext.data == NULL)
534 if (test_vec->plaintext.length < opts->max_buffer_size)
537 if (test_vec->plaintext.data == NULL)
539 if (test_vec->plaintext.length < opts->max_buffer_size)
541 if (test_vec->ciphertext.data == NULL)
543 if (test_vec->ciphertext.length < opts->max_buffer_size)
545 if (test_vec->cipher_iv.data == NULL)
547 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
549 if (test_vec->cipher_key.data == NULL)
551 if (test_vec->cipher_key.length != opts->cipher_key_sz)
554 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
555 if (test_vec->auth_key.data == NULL)
557 if (test_vec->auth_key.length != opts->auth_key_sz)
559 if (test_vec->auth_iv.length != opts->auth_iv_sz)
561 /* Auth IV is only required for some algorithms */
562 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
564 if (test_vec->digest.data == NULL)
566 if (test_vec->digest.length < opts->digest_sz)
569 } else if (opts->op_type == CPERF_AEAD) {
570 if (test_vec->plaintext.data == NULL)
572 if (test_vec->plaintext.length < opts->max_buffer_size)
574 if (test_vec->ciphertext.data == NULL)
576 if (test_vec->ciphertext.length < opts->max_buffer_size)
578 if (test_vec->aead_key.data == NULL)
580 if (test_vec->aead_key.length != opts->aead_key_sz)
582 if (test_vec->aead_iv.data == NULL)
584 if (test_vec->aead_iv.length != opts->aead_iv_sz)
586 if (test_vec->aad.data == NULL)
588 if (test_vec->aad.length != opts->aead_aad_sz)
590 if (test_vec->digest.data == NULL)
592 if (test_vec->digest.length < opts->digest_sz)
599 main(int argc, char **argv)
601 struct cperf_options opts = {0};
602 struct cperf_test_vector *t_vec = NULL;
603 struct cperf_op_fns op_fns;
604 void *ctx[RTE_MAX_LCORE] = { };
605 int nb_cryptodevs = 0;
606 uint16_t total_nb_qps = 0;
608 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
610 uint8_t buffer_size_idx = 0;
615 /* Initialise DPDK EAL */
616 ret = rte_eal_init(argc, argv);
618 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
622 cperf_options_default(&opts);
624 ret = cperf_options_parse(&opts, argc, argv);
626 RTE_LOG(ERR, USER1, "Parsing one or more user options failed\n");
630 ret = cperf_options_check(&opts);
633 "Checking one or more user options failed\n");
637 nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
640 cperf_options_dump(&opts);
642 if (nb_cryptodevs < 1) {
643 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
649 ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
652 RTE_LOG(ERR, USER1, "Crypto device type does not support "
653 "capabilities requested\n");
657 if (opts.test_file != NULL) {
658 t_vec = cperf_test_vector_get_from_file(&opts);
661 "Failed to create test vector for"
662 " specified file\n");
666 if (cperf_check_test_vector(&opts, t_vec)) {
667 RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
672 t_vec = cperf_test_vector_get_dummy(&opts);
675 "Failed to create test vector for"
676 " specified algorithms\n");
681 ret = cperf_get_op_functions(&opts, &op_fns);
683 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
684 "specified algorithms combination\n");
688 if (!opts.silent && opts.test != CPERF_TEST_TYPE_THROUGHPUT &&
689 opts.test != CPERF_TEST_TYPE_LATENCY)
690 show_test_vector(t_vec);
692 total_nb_qps = nb_cryptodevs * opts.nb_qps;
695 uint8_t qp_id = 0, cdev_index = 0;
696 RTE_LCORE_FOREACH_WORKER(lcore_id) {
698 if (i == total_nb_qps)
701 cdev_id = enabled_cdevs[cdev_index];
703 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
705 ctx[i] = cperf_testmap[opts.test].constructor(
706 session_pool_socket[socket_id].sess_mp,
707 session_pool_socket[socket_id].priv_mp,
709 &opts, t_vec, &op_fns);
710 if (ctx[i] == NULL) {
711 RTE_LOG(ERR, USER1, "Test run constructor failed\n");
714 qp_id = (qp_id + 1) % opts.nb_qps;
720 if (opts.imix_distribution_count != 0) {
721 uint8_t buffer_size_count = opts.buffer_size_count;
722 uint16_t distribution_total[buffer_size_count];
724 uint32_t test_average_size = 0;
725 const uint32_t *buffer_size_list = opts.buffer_size_list;
726 const uint32_t *imix_distribution_list = opts.imix_distribution_list;
728 opts.imix_buffer_sizes = rte_malloc(NULL,
729 sizeof(uint32_t) * opts.pool_sz,
732 * Calculate accumulated distribution of
733 * probabilities per packet size
735 distribution_total[0] = imix_distribution_list[0];
736 for (i = 1; i < buffer_size_count; i++)
737 distribution_total[i] = imix_distribution_list[i] +
738 distribution_total[i-1];
740 /* Calculate a random sequence of packet sizes, based on distribution */
741 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
742 uint16_t random_number = rte_rand() %
743 distribution_total[buffer_size_count - 1];
744 for (i = 0; i < buffer_size_count; i++)
745 if (random_number < distribution_total[i])
748 opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
751 /* Calculate average buffer size for the IMIX distribution */
752 for (i = 0; i < buffer_size_count; i++)
753 test_average_size += buffer_size_list[i] *
754 imix_distribution_list[i];
756 opts.test_buffer_size = test_average_size /
757 distribution_total[buffer_size_count - 1];
760 RTE_LCORE_FOREACH_WORKER(lcore_id) {
762 if (i == total_nb_qps)
765 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
770 RTE_LCORE_FOREACH_WORKER(lcore_id) {
772 if (i == total_nb_qps)
774 ret |= rte_eal_wait_lcore(lcore_id);
778 if (ret != EXIT_SUCCESS)
782 /* Get next size from range or list */
783 if (opts.inc_buffer_size != 0)
784 opts.test_buffer_size = opts.min_buffer_size;
786 opts.test_buffer_size = opts.buffer_size_list[0];
788 while (opts.test_buffer_size <= opts.max_buffer_size) {
790 RTE_LCORE_FOREACH_WORKER(lcore_id) {
792 if (i == total_nb_qps)
795 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
800 RTE_LCORE_FOREACH_WORKER(lcore_id) {
802 if (i == total_nb_qps)
804 ret |= rte_eal_wait_lcore(lcore_id);
808 if (ret != EXIT_SUCCESS)
811 /* Get next size from range or list */
812 if (opts.inc_buffer_size != 0)
813 opts.test_buffer_size += opts.inc_buffer_size;
815 if (++buffer_size_idx == opts.buffer_size_count)
817 opts.test_buffer_size =
818 opts.buffer_size_list[buffer_size_idx];
824 RTE_LCORE_FOREACH_WORKER(lcore_id) {
826 if (i == total_nb_qps)
829 cperf_testmap[opts.test].destructor(ctx[i]);
833 for (i = 0; i < nb_cryptodevs &&
834 i < RTE_CRYPTO_MAX_DEVS; i++) {
835 rte_cryptodev_stop(enabled_cdevs[i]);
836 ret = rte_cryptodev_close(enabled_cdevs[i]);
839 "Crypto device close error %d\n", ret);
842 free_test_vector(t_vec, &opts);
849 RTE_LCORE_FOREACH_WORKER(lcore_id) {
850 if (i == total_nb_qps)
853 if (ctx[i] && cperf_testmap[opts.test].destructor)
854 cperf_testmap[opts.test].destructor(ctx[i]);
858 for (i = 0; i < nb_cryptodevs &&
859 i < RTE_CRYPTO_MAX_DEVS; i++) {
860 rte_cryptodev_stop(enabled_cdevs[i]);
861 ret = rte_cryptodev_close(enabled_cdevs[i]);
864 "Crypto device close error %d\n", ret);
867 rte_free(opts.imix_buffer_sizes);
868 free_test_vector(t_vec, &opts);