34873af2f4fcedf95a86982a594c6cb2bb45cd91
[dpdk.git] / app / test-crypto-perf / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include <rte_malloc.h>
9 #include <rte_random.h>
10 #include <rte_eal.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
14 #endif
15
16 #include "cperf.h"
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
23
24 #define SESS_MEMPOOL_CACHE_SIZE 64
25
26 const char *cperf_test_type_strs[] = {
27         [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
28         [CPERF_TEST_TYPE_LATENCY] = "latency",
29         [CPERF_TEST_TYPE_VERIFY] = "verify",
30         [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
31 };
32
33 const char *cperf_op_type_strs[] = {
34         [CPERF_CIPHER_ONLY] = "cipher-only",
35         [CPERF_AUTH_ONLY] = "auth-only",
36         [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
37         [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
38         [CPERF_AEAD] = "aead"
39 };
40
41 const struct cperf_test cperf_testmap[] = {
42                 [CPERF_TEST_TYPE_THROUGHPUT] = {
43                                 cperf_throughput_test_constructor,
44                                 cperf_throughput_test_runner,
45                                 cperf_throughput_test_destructor
46                 },
47                 [CPERF_TEST_TYPE_LATENCY] = {
48                                 cperf_latency_test_constructor,
49                                 cperf_latency_test_runner,
50                                 cperf_latency_test_destructor
51                 },
52                 [CPERF_TEST_TYPE_VERIFY] = {
53                                 cperf_verify_test_constructor,
54                                 cperf_verify_test_runner,
55                                 cperf_verify_test_destructor
56                 },
57                 [CPERF_TEST_TYPE_PMDCC] = {
58                                 cperf_pmd_cyclecount_test_constructor,
59                                 cperf_pmd_cyclecount_test_runner,
60                                 cperf_pmd_cyclecount_test_destructor
61                 }
62 };
63
64 static int
65 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
66                         struct rte_mempool *session_pool_socket[])
67 {
68         uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
69         uint32_t sessions_needed = 0;
70         unsigned int i, j;
71         int ret;
72
73         enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
74                         enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
75         if (enabled_cdev_count == 0) {
76                 printf("No crypto devices type %s available\n",
77                                 opts->device_type);
78                 return -EINVAL;
79         }
80
81         nb_lcores = rte_lcore_count() - 1;
82
83         if (nb_lcores < 1) {
84                 RTE_LOG(ERR, USER1,
85                         "Number of enabled cores need to be higher than 1\n");
86                 return -EINVAL;
87         }
88
89         /*
90          * Use less number of devices,
91          * if there are more available than cores.
92          */
93         if (enabled_cdev_count > nb_lcores)
94                 enabled_cdev_count = nb_lcores;
95
96         /* Create a mempool shared by all the devices */
97         uint32_t max_sess_size = 0, sess_size;
98
99         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
100                 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
101                 if (sess_size > max_sess_size)
102                         max_sess_size = sess_size;
103         }
104
105         /*
106          * Calculate number of needed queue pairs, based on the amount
107          * of available number of logical cores and crypto devices.
108          * For instance, if there are 4 cores and 2 crypto devices,
109          * 2 queue pairs will be set up per device.
110          */
111         opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
112                                 (nb_lcores / enabled_cdev_count) + 1 :
113                                 nb_lcores / enabled_cdev_count;
114
115         for (i = 0; i < enabled_cdev_count &&
116                         i < RTE_CRYPTO_MAX_DEVS; i++) {
117                 cdev_id = enabled_cdevs[i];
118 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
119                 /*
120                  * If multi-core scheduler is used, limit the number
121                  * of queue pairs to 1, as there is no way to know
122                  * how many cores are being used by the PMD, and
123                  * how many will be available for the application.
124                  */
125                 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
126                                 rte_cryptodev_scheduler_mode_get(cdev_id) ==
127                                 CDEV_SCHED_MODE_MULTICORE)
128                         opts->nb_qps = 1;
129 #endif
130
131                 struct rte_cryptodev_info cdev_info;
132                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
133
134                 rte_cryptodev_info_get(cdev_id, &cdev_info);
135                 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
136                         printf("Number of needed queue pairs is higher "
137                                 "than the maximum number of queue pairs "
138                                 "per device.\n");
139                         printf("Lower the number of cores or increase "
140                                 "the number of crypto devices\n");
141                         return -EINVAL;
142                 }
143                 struct rte_cryptodev_config conf = {
144                         .nb_queue_pairs = opts->nb_qps,
145                         .socket_id = socket_id
146                 };
147
148                 struct rte_cryptodev_qp_conf qp_conf = {
149                         .nb_descriptors = opts->nb_descriptors
150                 };
151
152                 /**
153                  * Device info specifies the min headroom and tailroom
154                  * requirement for the crypto PMD. This need to be honoured
155                  * by the application, while creating mbuf.
156                  */
157                 if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
158                         /* Update headroom */
159                         opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
160                 }
161                 if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
162                         /* Update tailroom */
163                         opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
164                 }
165
166                 /* Update segment size to include headroom & tailroom */
167                 opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
168
169                 uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
170                 /*
171                  * Two sessions objects are required for each session
172                  * (one for the header, one for the private data)
173                  */
174                 if (!strcmp((const char *)opts->device_type,
175                                         "crypto_scheduler")) {
176 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
177                         uint32_t nb_slaves =
178                                 rte_cryptodev_scheduler_slaves_get(cdev_id,
179                                                                 NULL);
180
181                         sessions_needed = 2 * enabled_cdev_count *
182                                 opts->nb_qps * nb_slaves;
183 #endif
184                 } else
185                         sessions_needed = 2 * enabled_cdev_count *
186                                                 opts->nb_qps;
187
188                 /*
189                  * A single session is required per queue pair
190                  * in each device
191                  */
192                 if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
193                         RTE_LOG(ERR, USER1,
194                                 "Device does not support at least "
195                                 "%u sessions\n", opts->nb_qps);
196                         return -ENOTSUP;
197                 }
198                 if (session_pool_socket[socket_id] == NULL) {
199                         char mp_name[RTE_MEMPOOL_NAMESIZE];
200                         struct rte_mempool *sess_mp;
201
202                         snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
203                                 "sess_mp_%u", socket_id);
204                         sess_mp = rte_mempool_create(mp_name,
205                                                 sessions_needed,
206                                                 max_sess_size,
207                                                 SESS_MEMPOOL_CACHE_SIZE,
208                                                 0, NULL, NULL, NULL,
209                                                 NULL, socket_id,
210                                                 0);
211
212                         if (sess_mp == NULL) {
213                                 printf("Cannot create session pool on socket %d\n",
214                                         socket_id);
215                                 return -ENOMEM;
216                         }
217
218                         printf("Allocated session pool on socket %d\n", socket_id);
219                         session_pool_socket[socket_id] = sess_mp;
220                 }
221
222                 ret = rte_cryptodev_configure(cdev_id, &conf);
223                 if (ret < 0) {
224                         printf("Failed to configure cryptodev %u", cdev_id);
225                         return -EINVAL;
226                 }
227
228                 for (j = 0; j < opts->nb_qps; j++) {
229                         ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
230                                 &qp_conf, socket_id,
231                                 session_pool_socket[socket_id]);
232                         if (ret < 0) {
233                                 printf("Failed to setup queue pair %u on "
234                                         "cryptodev %u", j, cdev_id);
235                                 return -EINVAL;
236                         }
237                 }
238
239                 ret = rte_cryptodev_start(cdev_id);
240                 if (ret < 0) {
241                         printf("Failed to start device %u: error %d\n",
242                                         cdev_id, ret);
243                         return -EPERM;
244                 }
245         }
246
247         return enabled_cdev_count;
248 }
249
250 static int
251 cperf_verify_devices_capabilities(struct cperf_options *opts,
252                 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
253 {
254         struct rte_cryptodev_sym_capability_idx cap_idx;
255         const struct rte_cryptodev_symmetric_capability *capability;
256
257         uint8_t i, cdev_id;
258         int ret;
259
260         for (i = 0; i < nb_cryptodevs; i++) {
261
262                 cdev_id = enabled_cdevs[i];
263
264                 if (opts->op_type == CPERF_AUTH_ONLY ||
265                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
266                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
267
268                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
269                         cap_idx.algo.auth = opts->auth_algo;
270
271                         capability = rte_cryptodev_sym_capability_get(cdev_id,
272                                         &cap_idx);
273                         if (capability == NULL)
274                                 return -1;
275
276                         ret = rte_cryptodev_sym_capability_check_auth(
277                                         capability,
278                                         opts->auth_key_sz,
279                                         opts->digest_sz,
280                                         opts->auth_iv_sz);
281                         if (ret != 0)
282                                 return ret;
283                 }
284
285                 if (opts->op_type == CPERF_CIPHER_ONLY ||
286                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
287                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
288
289                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
290                         cap_idx.algo.cipher = opts->cipher_algo;
291
292                         capability = rte_cryptodev_sym_capability_get(cdev_id,
293                                         &cap_idx);
294                         if (capability == NULL)
295                                 return -1;
296
297                         ret = rte_cryptodev_sym_capability_check_cipher(
298                                         capability,
299                                         opts->cipher_key_sz,
300                                         opts->cipher_iv_sz);
301                         if (ret != 0)
302                                 return ret;
303                 }
304
305                 if (opts->op_type == CPERF_AEAD) {
306
307                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
308                         cap_idx.algo.aead = opts->aead_algo;
309
310                         capability = rte_cryptodev_sym_capability_get(cdev_id,
311                                         &cap_idx);
312                         if (capability == NULL)
313                                 return -1;
314
315                         ret = rte_cryptodev_sym_capability_check_aead(
316                                         capability,
317                                         opts->aead_key_sz,
318                                         opts->digest_sz,
319                                         opts->aead_aad_sz,
320                                         opts->aead_iv_sz);
321                         if (ret != 0)
322                                 return ret;
323                 }
324         }
325
326         return 0;
327 }
328
329 static int
330 cperf_check_test_vector(struct cperf_options *opts,
331                 struct cperf_test_vector *test_vec)
332 {
333         if (opts->op_type == CPERF_CIPHER_ONLY) {
334                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
335                         if (test_vec->plaintext.data == NULL)
336                                 return -1;
337                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
338                         if (test_vec->plaintext.data == NULL)
339                                 return -1;
340                         if (test_vec->plaintext.length < opts->max_buffer_size)
341                                 return -1;
342                         if (test_vec->ciphertext.data == NULL)
343                                 return -1;
344                         if (test_vec->ciphertext.length < opts->max_buffer_size)
345                                 return -1;
346                         if (test_vec->cipher_iv.data == NULL)
347                                 return -1;
348                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
349                                 return -1;
350                         if (test_vec->cipher_key.data == NULL)
351                                 return -1;
352                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
353                                 return -1;
354                 }
355         } else if (opts->op_type == CPERF_AUTH_ONLY) {
356                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
357                         if (test_vec->plaintext.data == NULL)
358                                 return -1;
359                         if (test_vec->plaintext.length < opts->max_buffer_size)
360                                 return -1;
361                         if (test_vec->auth_key.data == NULL)
362                                 return -1;
363                         if (test_vec->auth_key.length != opts->auth_key_sz)
364                                 return -1;
365                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
366                                 return -1;
367                         /* Auth IV is only required for some algorithms */
368                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
369                                 return -1;
370                         if (test_vec->digest.data == NULL)
371                                 return -1;
372                         if (test_vec->digest.length < opts->digest_sz)
373                                 return -1;
374                 }
375
376         } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
377                         opts->op_type == CPERF_AUTH_THEN_CIPHER) {
378                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
379                         if (test_vec->plaintext.data == NULL)
380                                 return -1;
381                         if (test_vec->plaintext.length < opts->max_buffer_size)
382                                 return -1;
383                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
384                         if (test_vec->plaintext.data == NULL)
385                                 return -1;
386                         if (test_vec->plaintext.length < opts->max_buffer_size)
387                                 return -1;
388                         if (test_vec->ciphertext.data == NULL)
389                                 return -1;
390                         if (test_vec->ciphertext.length < opts->max_buffer_size)
391                                 return -1;
392                         if (test_vec->cipher_iv.data == NULL)
393                                 return -1;
394                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
395                                 return -1;
396                         if (test_vec->cipher_key.data == NULL)
397                                 return -1;
398                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
399                                 return -1;
400                 }
401                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
402                         if (test_vec->auth_key.data == NULL)
403                                 return -1;
404                         if (test_vec->auth_key.length != opts->auth_key_sz)
405                                 return -1;
406                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
407                                 return -1;
408                         /* Auth IV is only required for some algorithms */
409                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
410                                 return -1;
411                         if (test_vec->digest.data == NULL)
412                                 return -1;
413                         if (test_vec->digest.length < opts->digest_sz)
414                                 return -1;
415                 }
416         } else if (opts->op_type == CPERF_AEAD) {
417                 if (test_vec->plaintext.data == NULL)
418                         return -1;
419                 if (test_vec->plaintext.length < opts->max_buffer_size)
420                         return -1;
421                 if (test_vec->ciphertext.data == NULL)
422                         return -1;
423                 if (test_vec->ciphertext.length < opts->max_buffer_size)
424                         return -1;
425                 if (test_vec->aead_iv.data == NULL)
426                         return -1;
427                 if (test_vec->aead_iv.length != opts->aead_iv_sz)
428                         return -1;
429                 if (test_vec->aad.data == NULL)
430                         return -1;
431                 if (test_vec->aad.length != opts->aead_aad_sz)
432                         return -1;
433                 if (test_vec->digest.data == NULL)
434                         return -1;
435                 if (test_vec->digest.length < opts->digest_sz)
436                         return -1;
437         }
438         return 0;
439 }
440
441 int
442 main(int argc, char **argv)
443 {
444         struct cperf_options opts = {0};
445         struct cperf_test_vector *t_vec = NULL;
446         struct cperf_op_fns op_fns;
447
448         void *ctx[RTE_MAX_LCORE] = { };
449         struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
450
451         int nb_cryptodevs = 0;
452         uint16_t total_nb_qps = 0;
453         uint8_t cdev_id, i;
454         uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
455
456         uint8_t buffer_size_idx = 0;
457
458         int ret;
459         uint32_t lcore_id;
460
461         /* Initialise DPDK EAL */
462         ret = rte_eal_init(argc, argv);
463         if (ret < 0)
464                 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
465         argc -= ret;
466         argv += ret;
467
468         cperf_options_default(&opts);
469
470         ret = cperf_options_parse(&opts, argc, argv);
471         if (ret) {
472                 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
473                 goto err;
474         }
475
476         ret = cperf_options_check(&opts);
477         if (ret) {
478                 RTE_LOG(ERR, USER1,
479                                 "Checking on or more user options failed\n");
480                 goto err;
481         }
482
483         nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
484                         session_pool_socket);
485
486         if (!opts.silent)
487                 cperf_options_dump(&opts);
488
489         if (nb_cryptodevs < 1) {
490                 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
491                                 "device type\n");
492                 nb_cryptodevs = 0;
493                 goto err;
494         }
495
496         ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
497                         nb_cryptodevs);
498         if (ret) {
499                 RTE_LOG(ERR, USER1, "Crypto device type does not support "
500                                 "capabilities requested\n");
501                 goto err;
502         }
503
504         if (opts.test_file != NULL) {
505                 t_vec = cperf_test_vector_get_from_file(&opts);
506                 if (t_vec == NULL) {
507                         RTE_LOG(ERR, USER1,
508                                         "Failed to create test vector for"
509                                         " specified file\n");
510                         goto err;
511                 }
512
513                 if (cperf_check_test_vector(&opts, t_vec)) {
514                         RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
515                                         "\n");
516                         goto err;
517                 }
518         } else {
519                 t_vec = cperf_test_vector_get_dummy(&opts);
520                 if (t_vec == NULL) {
521                         RTE_LOG(ERR, USER1,
522                                         "Failed to create test vector for"
523                                         " specified algorithms\n");
524                         goto err;
525                 }
526         }
527
528         ret = cperf_get_op_functions(&opts, &op_fns);
529         if (ret) {
530                 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
531                                 "specified algorithms combination\n");
532                 goto err;
533         }
534
535         if (!opts.silent)
536                 show_test_vector(t_vec);
537
538         total_nb_qps = nb_cryptodevs * opts.nb_qps;
539
540         i = 0;
541         uint8_t qp_id = 0, cdev_index = 0;
542         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
543
544                 if (i == total_nb_qps)
545                         break;
546
547                 cdev_id = enabled_cdevs[cdev_index];
548
549                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
550
551                 ctx[i] = cperf_testmap[opts.test].constructor(
552                                 session_pool_socket[socket_id], cdev_id, qp_id,
553                                 &opts, t_vec, &op_fns);
554                 if (ctx[i] == NULL) {
555                         RTE_LOG(ERR, USER1, "Test run constructor failed\n");
556                         goto err;
557                 }
558                 qp_id = (qp_id + 1) % opts.nb_qps;
559                 if (qp_id == 0)
560                         cdev_index++;
561                 i++;
562         }
563
564         if (opts.imix_distribution_count != 0) {
565                 uint8_t buffer_size_count = opts.buffer_size_count;
566                 uint16_t distribution_total[buffer_size_count];
567                 uint32_t op_idx;
568                 uint32_t test_average_size = 0;
569                 const uint32_t *buffer_size_list = opts.buffer_size_list;
570                 const uint32_t *imix_distribution_list = opts.imix_distribution_list;
571
572                 opts.imix_buffer_sizes = rte_malloc(NULL,
573                                         sizeof(uint32_t) * opts.pool_sz,
574                                         0);
575                 /*
576                  * Calculate accumulated distribution of
577                  * probabilities per packet size
578                  */
579                 distribution_total[0] = imix_distribution_list[0];
580                 for (i = 1; i < buffer_size_count; i++)
581                         distribution_total[i] = imix_distribution_list[i] +
582                                 distribution_total[i-1];
583
584                 /* Calculate a random sequence of packet sizes, based on distribution */
585                 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
586                         uint16_t random_number = rte_rand() %
587                                 distribution_total[buffer_size_count - 1];
588                         for (i = 0; i < buffer_size_count; i++)
589                                 if (random_number < distribution_total[i])
590                                         break;
591
592                         opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
593                 }
594
595                 /* Calculate average buffer size for the IMIX distribution */
596                 for (i = 0; i < buffer_size_count; i++)
597                         test_average_size += buffer_size_list[i] *
598                                 imix_distribution_list[i];
599
600                 opts.test_buffer_size = test_average_size /
601                                 distribution_total[buffer_size_count - 1];
602
603                 i = 0;
604                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
605
606                         if (i == total_nb_qps)
607                                 break;
608
609                         rte_eal_remote_launch(cperf_testmap[opts.test].runner,
610                                 ctx[i], lcore_id);
611                         i++;
612                 }
613                 i = 0;
614                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
615
616                         if (i == total_nb_qps)
617                                 break;
618                         rte_eal_wait_lcore(lcore_id);
619                         i++;
620                 }
621         } else {
622
623                 /* Get next size from range or list */
624                 if (opts.inc_buffer_size != 0)
625                         opts.test_buffer_size = opts.min_buffer_size;
626                 else
627                         opts.test_buffer_size = opts.buffer_size_list[0];
628
629                 while (opts.test_buffer_size <= opts.max_buffer_size) {
630                         i = 0;
631                         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
632
633                                 if (i == total_nb_qps)
634                                         break;
635
636                                 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
637                                         ctx[i], lcore_id);
638                                 i++;
639                         }
640                         i = 0;
641                         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
642
643                                 if (i == total_nb_qps)
644                                         break;
645                                 rte_eal_wait_lcore(lcore_id);
646                                 i++;
647                         }
648
649                         /* Get next size from range or list */
650                         if (opts.inc_buffer_size != 0)
651                                 opts.test_buffer_size += opts.inc_buffer_size;
652                         else {
653                                 if (++buffer_size_idx == opts.buffer_size_count)
654                                         break;
655                                 opts.test_buffer_size =
656                                         opts.buffer_size_list[buffer_size_idx];
657                         }
658                 }
659         }
660
661         i = 0;
662         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
663
664                 if (i == total_nb_qps)
665                         break;
666
667                 cperf_testmap[opts.test].destructor(ctx[i]);
668                 i++;
669         }
670
671         for (i = 0; i < nb_cryptodevs &&
672                         i < RTE_CRYPTO_MAX_DEVS; i++)
673                 rte_cryptodev_stop(enabled_cdevs[i]);
674
675         free_test_vector(t_vec, &opts);
676
677         printf("\n");
678         return EXIT_SUCCESS;
679
680 err:
681         i = 0;
682         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
683                 if (i == total_nb_qps)
684                         break;
685
686                 if (ctx[i] && cperf_testmap[opts.test].destructor)
687                         cperf_testmap[opts.test].destructor(ctx[i]);
688                 i++;
689         }
690
691         for (i = 0; i < nb_cryptodevs &&
692                         i < RTE_CRYPTO_MAX_DEVS; i++)
693                 rte_cryptodev_stop(enabled_cdevs[i]);
694         rte_free(opts.imix_buffer_sizes);
695         free_test_vector(t_vec, &opts);
696
697         printf("\n");
698         return EXIT_FAILURE;
699 }