74e2165a43c84b01b26b1d247933c5489e70712e
[dpdk.git] / app / test-crypto-perf / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include <rte_malloc.h>
9 #include <rte_random.h>
10 #include <rte_eal.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
14 #endif
15
16 #include "cperf.h"
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
23
24 #define SESS_MEMPOOL_CACHE_SIZE 64
25
26 const char *cperf_test_type_strs[] = {
27         [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
28         [CPERF_TEST_TYPE_LATENCY] = "latency",
29         [CPERF_TEST_TYPE_VERIFY] = "verify",
30         [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
31 };
32
33 const char *cperf_op_type_strs[] = {
34         [CPERF_CIPHER_ONLY] = "cipher-only",
35         [CPERF_AUTH_ONLY] = "auth-only",
36         [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
37         [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
38         [CPERF_AEAD] = "aead"
39 };
40
41 const struct cperf_test cperf_testmap[] = {
42                 [CPERF_TEST_TYPE_THROUGHPUT] = {
43                                 cperf_throughput_test_constructor,
44                                 cperf_throughput_test_runner,
45                                 cperf_throughput_test_destructor
46                 },
47                 [CPERF_TEST_TYPE_LATENCY] = {
48                                 cperf_latency_test_constructor,
49                                 cperf_latency_test_runner,
50                                 cperf_latency_test_destructor
51                 },
52                 [CPERF_TEST_TYPE_VERIFY] = {
53                                 cperf_verify_test_constructor,
54                                 cperf_verify_test_runner,
55                                 cperf_verify_test_destructor
56                 },
57                 [CPERF_TEST_TYPE_PMDCC] = {
58                                 cperf_pmd_cyclecount_test_constructor,
59                                 cperf_pmd_cyclecount_test_runner,
60                                 cperf_pmd_cyclecount_test_destructor
61                 }
62 };
63
64 static int
65 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
66                         struct rte_mempool *session_pool_socket[])
67 {
68         uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
69         uint32_t sessions_needed = 0;
70         unsigned int i, j;
71         int ret;
72
73         enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
74                         enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
75         if (enabled_cdev_count == 0) {
76                 printf("No crypto devices type %s available\n",
77                                 opts->device_type);
78                 return -EINVAL;
79         }
80
81         nb_lcores = rte_lcore_count() - 1;
82
83         if (nb_lcores < 1) {
84                 RTE_LOG(ERR, USER1,
85                         "Number of enabled cores need to be higher than 1\n");
86                 return -EINVAL;
87         }
88
89         /*
90          * Use less number of devices,
91          * if there are more available than cores.
92          */
93         if (enabled_cdev_count > nb_lcores)
94                 enabled_cdev_count = nb_lcores;
95
96         /* Create a mempool shared by all the devices */
97         uint32_t max_sess_size = 0, sess_size;
98
99         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
100                 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
101                 if (sess_size > max_sess_size)
102                         max_sess_size = sess_size;
103         }
104
105         /*
106          * Calculate number of needed queue pairs, based on the amount
107          * of available number of logical cores and crypto devices.
108          * For instance, if there are 4 cores and 2 crypto devices,
109          * 2 queue pairs will be set up per device.
110          */
111         opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
112                                 (nb_lcores / enabled_cdev_count) + 1 :
113                                 nb_lcores / enabled_cdev_count;
114
115         for (i = 0; i < enabled_cdev_count &&
116                         i < RTE_CRYPTO_MAX_DEVS; i++) {
117                 cdev_id = enabled_cdevs[i];
118 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
119                 /*
120                  * If multi-core scheduler is used, limit the number
121                  * of queue pairs to 1, as there is no way to know
122                  * how many cores are being used by the PMD, and
123                  * how many will be available for the application.
124                  */
125                 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
126                                 rte_cryptodev_scheduler_mode_get(cdev_id) ==
127                                 CDEV_SCHED_MODE_MULTICORE)
128                         opts->nb_qps = 1;
129 #endif
130
131                 struct rte_cryptodev_info cdev_info;
132                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
133
134                 rte_cryptodev_info_get(cdev_id, &cdev_info);
135                 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
136                         printf("Number of needed queue pairs is higher "
137                                 "than the maximum number of queue pairs "
138                                 "per device.\n");
139                         printf("Lower the number of cores or increase "
140                                 "the number of crypto devices\n");
141                         return -EINVAL;
142                 }
143                 struct rte_cryptodev_config conf = {
144                         .nb_queue_pairs = opts->nb_qps,
145                         .socket_id = socket_id
146                 };
147
148                 struct rte_cryptodev_qp_conf qp_conf = {
149                         .nb_descriptors = opts->nb_descriptors
150                 };
151
152                 uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
153                 /*
154                  * Two sessions objects are required for each session
155                  * (one for the header, one for the private data)
156                  */
157                 if (!strcmp((const char *)opts->device_type,
158                                         "crypto_scheduler")) {
159 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
160                         uint32_t nb_slaves =
161                                 rte_cryptodev_scheduler_slaves_get(cdev_id,
162                                                                 NULL);
163
164                         sessions_needed = 2 * enabled_cdev_count *
165                                 opts->nb_qps * nb_slaves;
166 #endif
167                 } else
168                         sessions_needed = 2 * enabled_cdev_count *
169                                                 opts->nb_qps;
170
171                 /*
172                  * A single session is required per queue pair
173                  * in each device
174                  */
175                 if (dev_max_nb_sess < opts->nb_qps) {
176                         RTE_LOG(ERR, USER1,
177                                 "Device does not support at least "
178                                 "%u sessions\n", opts->nb_qps);
179                         return -ENOTSUP;
180                 }
181                 if (session_pool_socket[socket_id] == NULL) {
182                         char mp_name[RTE_MEMPOOL_NAMESIZE];
183                         struct rte_mempool *sess_mp;
184
185                         snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
186                                 "sess_mp_%u", socket_id);
187                         sess_mp = rte_mempool_create(mp_name,
188                                                 sessions_needed,
189                                                 max_sess_size,
190                                                 SESS_MEMPOOL_CACHE_SIZE,
191                                                 0, NULL, NULL, NULL,
192                                                 NULL, socket_id,
193                                                 0);
194
195                         if (sess_mp == NULL) {
196                                 printf("Cannot create session pool on socket %d\n",
197                                         socket_id);
198                                 return -ENOMEM;
199                         }
200
201                         printf("Allocated session pool on socket %d\n", socket_id);
202                         session_pool_socket[socket_id] = sess_mp;
203                 }
204
205                 ret = rte_cryptodev_configure(cdev_id, &conf);
206                 if (ret < 0) {
207                         printf("Failed to configure cryptodev %u", cdev_id);
208                         return -EINVAL;
209                 }
210
211                 for (j = 0; j < opts->nb_qps; j++) {
212                         ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
213                                 &qp_conf, socket_id,
214                                 session_pool_socket[socket_id]);
215                         if (ret < 0) {
216                                 printf("Failed to setup queue pair %u on "
217                                         "cryptodev %u", j, cdev_id);
218                                 return -EINVAL;
219                         }
220                 }
221
222                 ret = rte_cryptodev_start(cdev_id);
223                 if (ret < 0) {
224                         printf("Failed to start device %u: error %d\n",
225                                         cdev_id, ret);
226                         return -EPERM;
227                 }
228         }
229
230         return enabled_cdev_count;
231 }
232
233 static int
234 cperf_verify_devices_capabilities(struct cperf_options *opts,
235                 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
236 {
237         struct rte_cryptodev_sym_capability_idx cap_idx;
238         const struct rte_cryptodev_symmetric_capability *capability;
239
240         uint8_t i, cdev_id;
241         int ret;
242
243         for (i = 0; i < nb_cryptodevs; i++) {
244
245                 cdev_id = enabled_cdevs[i];
246
247                 if (opts->op_type == CPERF_AUTH_ONLY ||
248                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
249                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
250
251                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
252                         cap_idx.algo.auth = opts->auth_algo;
253
254                         capability = rte_cryptodev_sym_capability_get(cdev_id,
255                                         &cap_idx);
256                         if (capability == NULL)
257                                 return -1;
258
259                         ret = rte_cryptodev_sym_capability_check_auth(
260                                         capability,
261                                         opts->auth_key_sz,
262                                         opts->digest_sz,
263                                         opts->auth_iv_sz);
264                         if (ret != 0)
265                                 return ret;
266                 }
267
268                 if (opts->op_type == CPERF_CIPHER_ONLY ||
269                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
270                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
271
272                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
273                         cap_idx.algo.cipher = opts->cipher_algo;
274
275                         capability = rte_cryptodev_sym_capability_get(cdev_id,
276                                         &cap_idx);
277                         if (capability == NULL)
278                                 return -1;
279
280                         ret = rte_cryptodev_sym_capability_check_cipher(
281                                         capability,
282                                         opts->cipher_key_sz,
283                                         opts->cipher_iv_sz);
284                         if (ret != 0)
285                                 return ret;
286                 }
287
288                 if (opts->op_type == CPERF_AEAD) {
289
290                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
291                         cap_idx.algo.aead = opts->aead_algo;
292
293                         capability = rte_cryptodev_sym_capability_get(cdev_id,
294                                         &cap_idx);
295                         if (capability == NULL)
296                                 return -1;
297
298                         ret = rte_cryptodev_sym_capability_check_aead(
299                                         capability,
300                                         opts->aead_key_sz,
301                                         opts->digest_sz,
302                                         opts->aead_aad_sz,
303                                         opts->aead_iv_sz);
304                         if (ret != 0)
305                                 return ret;
306                 }
307         }
308
309         return 0;
310 }
311
312 static int
313 cperf_check_test_vector(struct cperf_options *opts,
314                 struct cperf_test_vector *test_vec)
315 {
316         if (opts->op_type == CPERF_CIPHER_ONLY) {
317                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
318                         if (test_vec->plaintext.data == NULL)
319                                 return -1;
320                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
321                         if (test_vec->plaintext.data == NULL)
322                                 return -1;
323                         if (test_vec->plaintext.length < opts->max_buffer_size)
324                                 return -1;
325                         if (test_vec->ciphertext.data == NULL)
326                                 return -1;
327                         if (test_vec->ciphertext.length < opts->max_buffer_size)
328                                 return -1;
329                         if (test_vec->cipher_iv.data == NULL)
330                                 return -1;
331                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
332                                 return -1;
333                         if (test_vec->cipher_key.data == NULL)
334                                 return -1;
335                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
336                                 return -1;
337                 }
338         } else if (opts->op_type == CPERF_AUTH_ONLY) {
339                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
340                         if (test_vec->plaintext.data == NULL)
341                                 return -1;
342                         if (test_vec->plaintext.length < opts->max_buffer_size)
343                                 return -1;
344                         if (test_vec->auth_key.data == NULL)
345                                 return -1;
346                         if (test_vec->auth_key.length != opts->auth_key_sz)
347                                 return -1;
348                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
349                                 return -1;
350                         /* Auth IV is only required for some algorithms */
351                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
352                                 return -1;
353                         if (test_vec->digest.data == NULL)
354                                 return -1;
355                         if (test_vec->digest.length < opts->digest_sz)
356                                 return -1;
357                 }
358
359         } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
360                         opts->op_type == CPERF_AUTH_THEN_CIPHER) {
361                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
362                         if (test_vec->plaintext.data == NULL)
363                                 return -1;
364                         if (test_vec->plaintext.length < opts->max_buffer_size)
365                                 return -1;
366                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
367                         if (test_vec->plaintext.data == NULL)
368                                 return -1;
369                         if (test_vec->plaintext.length < opts->max_buffer_size)
370                                 return -1;
371                         if (test_vec->ciphertext.data == NULL)
372                                 return -1;
373                         if (test_vec->ciphertext.length < opts->max_buffer_size)
374                                 return -1;
375                         if (test_vec->cipher_iv.data == NULL)
376                                 return -1;
377                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
378                                 return -1;
379                         if (test_vec->cipher_key.data == NULL)
380                                 return -1;
381                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
382                                 return -1;
383                 }
384                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
385                         if (test_vec->auth_key.data == NULL)
386                                 return -1;
387                         if (test_vec->auth_key.length != opts->auth_key_sz)
388                                 return -1;
389                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
390                                 return -1;
391                         /* Auth IV is only required for some algorithms */
392                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
393                                 return -1;
394                         if (test_vec->digest.data == NULL)
395                                 return -1;
396                         if (test_vec->digest.length < opts->digest_sz)
397                                 return -1;
398                 }
399         } else if (opts->op_type == CPERF_AEAD) {
400                 if (test_vec->plaintext.data == NULL)
401                         return -1;
402                 if (test_vec->plaintext.length < opts->max_buffer_size)
403                         return -1;
404                 if (test_vec->ciphertext.data == NULL)
405                         return -1;
406                 if (test_vec->ciphertext.length < opts->max_buffer_size)
407                         return -1;
408                 if (test_vec->aead_iv.data == NULL)
409                         return -1;
410                 if (test_vec->aead_iv.length != opts->aead_iv_sz)
411                         return -1;
412                 if (test_vec->aad.data == NULL)
413                         return -1;
414                 if (test_vec->aad.length != opts->aead_aad_sz)
415                         return -1;
416                 if (test_vec->digest.data == NULL)
417                         return -1;
418                 if (test_vec->digest.length < opts->digest_sz)
419                         return -1;
420         }
421         return 0;
422 }
423
424 int
425 main(int argc, char **argv)
426 {
427         struct cperf_options opts = {0};
428         struct cperf_test_vector *t_vec = NULL;
429         struct cperf_op_fns op_fns;
430
431         void *ctx[RTE_MAX_LCORE] = { };
432         struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
433
434         int nb_cryptodevs = 0;
435         uint16_t total_nb_qps = 0;
436         uint8_t cdev_id, i;
437         uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
438
439         uint8_t buffer_size_idx = 0;
440
441         int ret;
442         uint32_t lcore_id;
443
444         /* Initialise DPDK EAL */
445         ret = rte_eal_init(argc, argv);
446         if (ret < 0)
447                 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
448         argc -= ret;
449         argv += ret;
450
451         cperf_options_default(&opts);
452
453         ret = cperf_options_parse(&opts, argc, argv);
454         if (ret) {
455                 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
456                 goto err;
457         }
458
459         ret = cperf_options_check(&opts);
460         if (ret) {
461                 RTE_LOG(ERR, USER1,
462                                 "Checking on or more user options failed\n");
463                 goto err;
464         }
465
466         nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
467                         session_pool_socket);
468
469         if (!opts.silent)
470                 cperf_options_dump(&opts);
471
472         if (nb_cryptodevs < 1) {
473                 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
474                                 "device type\n");
475                 nb_cryptodevs = 0;
476                 goto err;
477         }
478
479         ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
480                         nb_cryptodevs);
481         if (ret) {
482                 RTE_LOG(ERR, USER1, "Crypto device type does not support "
483                                 "capabilities requested\n");
484                 goto err;
485         }
486
487         if (opts.test_file != NULL) {
488                 t_vec = cperf_test_vector_get_from_file(&opts);
489                 if (t_vec == NULL) {
490                         RTE_LOG(ERR, USER1,
491                                         "Failed to create test vector for"
492                                         " specified file\n");
493                         goto err;
494                 }
495
496                 if (cperf_check_test_vector(&opts, t_vec)) {
497                         RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
498                                         "\n");
499                         goto err;
500                 }
501         } else {
502                 t_vec = cperf_test_vector_get_dummy(&opts);
503                 if (t_vec == NULL) {
504                         RTE_LOG(ERR, USER1,
505                                         "Failed to create test vector for"
506                                         " specified algorithms\n");
507                         goto err;
508                 }
509         }
510
511         ret = cperf_get_op_functions(&opts, &op_fns);
512         if (ret) {
513                 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
514                                 "specified algorithms combination\n");
515                 goto err;
516         }
517
518         if (!opts.silent)
519                 show_test_vector(t_vec);
520
521         total_nb_qps = nb_cryptodevs * opts.nb_qps;
522
523         i = 0;
524         uint8_t qp_id = 0, cdev_index = 0;
525         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
526
527                 if (i == total_nb_qps)
528                         break;
529
530                 cdev_id = enabled_cdevs[cdev_index];
531
532                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
533
534                 ctx[i] = cperf_testmap[opts.test].constructor(
535                                 session_pool_socket[socket_id], cdev_id, qp_id,
536                                 &opts, t_vec, &op_fns);
537                 if (ctx[i] == NULL) {
538                         RTE_LOG(ERR, USER1, "Test run constructor failed\n");
539                         goto err;
540                 }
541                 qp_id = (qp_id + 1) % opts.nb_qps;
542                 if (qp_id == 0)
543                         cdev_index++;
544                 i++;
545         }
546
547         if (opts.imix_distribution_count != 0) {
548                 uint8_t buffer_size_count = opts.buffer_size_count;
549                 uint16_t distribution_total[buffer_size_count];
550                 uint32_t op_idx;
551                 uint32_t test_average_size = 0;
552                 const uint32_t *buffer_size_list = opts.buffer_size_list;
553                 const uint32_t *imix_distribution_list = opts.imix_distribution_list;
554
555                 opts.imix_buffer_sizes = rte_malloc(NULL,
556                                         sizeof(uint32_t) * opts.pool_sz,
557                                         0);
558                 /*
559                  * Calculate accumulated distribution of
560                  * probabilities per packet size
561                  */
562                 distribution_total[0] = imix_distribution_list[0];
563                 for (i = 1; i < buffer_size_count; i++)
564                         distribution_total[i] = imix_distribution_list[i] +
565                                 distribution_total[i-1];
566
567                 /* Calculate a random sequence of packet sizes, based on distribution */
568                 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
569                         uint16_t random_number = rte_rand() %
570                                 distribution_total[buffer_size_count - 1];
571                         for (i = 0; i < buffer_size_count; i++)
572                                 if (random_number < distribution_total[i])
573                                         break;
574
575                         opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
576                 }
577
578                 /* Calculate average buffer size for the IMIX distribution */
579                 for (i = 0; i < buffer_size_count; i++)
580                         test_average_size += buffer_size_list[i] *
581                                 imix_distribution_list[i];
582
583                 opts.test_buffer_size = test_average_size /
584                                 distribution_total[buffer_size_count - 1];
585
586                 i = 0;
587                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
588
589                         if (i == total_nb_qps)
590                                 break;
591
592                         rte_eal_remote_launch(cperf_testmap[opts.test].runner,
593                                 ctx[i], lcore_id);
594                         i++;
595                 }
596                 i = 0;
597                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
598
599                         if (i == total_nb_qps)
600                                 break;
601                         rte_eal_wait_lcore(lcore_id);
602                         i++;
603                 }
604         } else {
605
606                 /* Get next size from range or list */
607                 if (opts.inc_buffer_size != 0)
608                         opts.test_buffer_size = opts.min_buffer_size;
609                 else
610                         opts.test_buffer_size = opts.buffer_size_list[0];
611
612                 while (opts.test_buffer_size <= opts.max_buffer_size) {
613                         i = 0;
614                         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
615
616                                 if (i == total_nb_qps)
617                                         break;
618
619                                 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
620                                         ctx[i], lcore_id);
621                                 i++;
622                         }
623                         i = 0;
624                         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
625
626                                 if (i == total_nb_qps)
627                                         break;
628                                 rte_eal_wait_lcore(lcore_id);
629                                 i++;
630                         }
631
632                         /* Get next size from range or list */
633                         if (opts.inc_buffer_size != 0)
634                                 opts.test_buffer_size += opts.inc_buffer_size;
635                         else {
636                                 if (++buffer_size_idx == opts.buffer_size_count)
637                                         break;
638                                 opts.test_buffer_size =
639                                         opts.buffer_size_list[buffer_size_idx];
640                         }
641                 }
642         }
643
644         i = 0;
645         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
646
647                 if (i == total_nb_qps)
648                         break;
649
650                 cperf_testmap[opts.test].destructor(ctx[i]);
651                 i++;
652         }
653
654         for (i = 0; i < nb_cryptodevs &&
655                         i < RTE_CRYPTO_MAX_DEVS; i++)
656                 rte_cryptodev_stop(enabled_cdevs[i]);
657
658         free_test_vector(t_vec, &opts);
659
660         printf("\n");
661         return EXIT_SUCCESS;
662
663 err:
664         i = 0;
665         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
666                 if (i == total_nb_qps)
667                         break;
668
669                 if (ctx[i] && cperf_testmap[opts.test].destructor)
670                         cperf_testmap[opts.test].destructor(ctx[i]);
671                 i++;
672         }
673
674         for (i = 0; i < nb_cryptodevs &&
675                         i < RTE_CRYPTO_MAX_DEVS; i++)
676                 rte_cryptodev_stop(enabled_cdevs[i]);
677         rte_free(opts.imix_buffer_sizes);
678         free_test_vector(t_vec, &opts);
679
680         printf("\n");
681         return EXIT_FAILURE;
682 }