cryptodev: change queue pair configure structure
[dpdk.git] / app / test-crypto-perf / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include <rte_malloc.h>
9 #include <rte_random.h>
10 #include <rte_eal.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
14 #endif
15
16 #include "cperf.h"
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
23
24
25 const char *cperf_test_type_strs[] = {
26         [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
27         [CPERF_TEST_TYPE_LATENCY] = "latency",
28         [CPERF_TEST_TYPE_VERIFY] = "verify",
29         [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
30 };
31
32 const char *cperf_op_type_strs[] = {
33         [CPERF_CIPHER_ONLY] = "cipher-only",
34         [CPERF_AUTH_ONLY] = "auth-only",
35         [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
36         [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
37         [CPERF_AEAD] = "aead"
38 };
39
40 const struct cperf_test cperf_testmap[] = {
41                 [CPERF_TEST_TYPE_THROUGHPUT] = {
42                                 cperf_throughput_test_constructor,
43                                 cperf_throughput_test_runner,
44                                 cperf_throughput_test_destructor
45                 },
46                 [CPERF_TEST_TYPE_LATENCY] = {
47                                 cperf_latency_test_constructor,
48                                 cperf_latency_test_runner,
49                                 cperf_latency_test_destructor
50                 },
51                 [CPERF_TEST_TYPE_VERIFY] = {
52                                 cperf_verify_test_constructor,
53                                 cperf_verify_test_runner,
54                                 cperf_verify_test_destructor
55                 },
56                 [CPERF_TEST_TYPE_PMDCC] = {
57                                 cperf_pmd_cyclecount_test_constructor,
58                                 cperf_pmd_cyclecount_test_runner,
59                                 cperf_pmd_cyclecount_test_destructor
60                 }
61 };
62
63 static int
64 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
65                         struct rte_mempool *session_pool_socket[])
66 {
67         uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
68         uint32_t sessions_needed = 0;
69         unsigned int i, j;
70         int ret;
71
72         enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
73                         enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
74         if (enabled_cdev_count == 0) {
75                 printf("No crypto devices type %s available\n",
76                                 opts->device_type);
77                 return -EINVAL;
78         }
79
80         nb_lcores = rte_lcore_count() - 1;
81
82         if (nb_lcores < 1) {
83                 RTE_LOG(ERR, USER1,
84                         "Number of enabled cores need to be higher than 1\n");
85                 return -EINVAL;
86         }
87
88         /*
89          * Use less number of devices,
90          * if there are more available than cores.
91          */
92         if (enabled_cdev_count > nb_lcores)
93                 enabled_cdev_count = nb_lcores;
94
95         /* Create a mempool shared by all the devices */
96         uint32_t max_sess_size = 0, sess_size;
97
98         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
99                 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
100                 if (sess_size > max_sess_size)
101                         max_sess_size = sess_size;
102         }
103
104         /*
105          * Calculate number of needed queue pairs, based on the amount
106          * of available number of logical cores and crypto devices.
107          * For instance, if there are 4 cores and 2 crypto devices,
108          * 2 queue pairs will be set up per device.
109          */
110         opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
111                                 (nb_lcores / enabled_cdev_count) + 1 :
112                                 nb_lcores / enabled_cdev_count;
113
114         for (i = 0; i < enabled_cdev_count &&
115                         i < RTE_CRYPTO_MAX_DEVS; i++) {
116                 cdev_id = enabled_cdevs[i];
117 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
118                 /*
119                  * If multi-core scheduler is used, limit the number
120                  * of queue pairs to 1, as there is no way to know
121                  * how many cores are being used by the PMD, and
122                  * how many will be available for the application.
123                  */
124                 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
125                                 rte_cryptodev_scheduler_mode_get(cdev_id) ==
126                                 CDEV_SCHED_MODE_MULTICORE)
127                         opts->nb_qps = 1;
128 #endif
129
130                 struct rte_cryptodev_info cdev_info;
131                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
132
133                 rte_cryptodev_info_get(cdev_id, &cdev_info);
134                 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
135                         printf("Number of needed queue pairs is higher "
136                                 "than the maximum number of queue pairs "
137                                 "per device.\n");
138                         printf("Lower the number of cores or increase "
139                                 "the number of crypto devices\n");
140                         return -EINVAL;
141                 }
142                 struct rte_cryptodev_config conf = {
143                         .nb_queue_pairs = opts->nb_qps,
144                         .socket_id = socket_id
145                 };
146
147                 struct rte_cryptodev_qp_conf qp_conf = {
148                         .nb_descriptors = opts->nb_descriptors
149                 };
150
151                 /**
152                  * Device info specifies the min headroom and tailroom
153                  * requirement for the crypto PMD. This need to be honoured
154                  * by the application, while creating mbuf.
155                  */
156                 if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
157                         /* Update headroom */
158                         opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
159                 }
160                 if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
161                         /* Update tailroom */
162                         opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
163                 }
164
165                 /* Update segment size to include headroom & tailroom */
166                 opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
167
168                 uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
169                 /*
170                  * Two sessions objects are required for each session
171                  * (one for the header, one for the private data)
172                  */
173                 if (!strcmp((const char *)opts->device_type,
174                                         "crypto_scheduler")) {
175 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
176                         uint32_t nb_slaves =
177                                 rte_cryptodev_scheduler_slaves_get(cdev_id,
178                                                                 NULL);
179
180                         sessions_needed = 2 * enabled_cdev_count *
181                                 opts->nb_qps * nb_slaves;
182 #endif
183                 } else
184                         sessions_needed = 2 * enabled_cdev_count *
185                                                 opts->nb_qps;
186
187                 /*
188                  * A single session is required per queue pair
189                  * in each device
190                  */
191                 if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
192                         RTE_LOG(ERR, USER1,
193                                 "Device does not support at least "
194                                 "%u sessions\n", opts->nb_qps);
195                         return -ENOTSUP;
196                 }
197                 if (session_pool_socket[socket_id] == NULL) {
198                         char mp_name[RTE_MEMPOOL_NAMESIZE];
199                         struct rte_mempool *sess_mp;
200
201                         snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
202                                 "sess_mp_%u", socket_id);
203                         sess_mp = rte_mempool_create(mp_name,
204                                                 sessions_needed,
205                                                 max_sess_size,
206                                                 0,
207                                                 0, NULL, NULL, NULL,
208                                                 NULL, socket_id,
209                                                 0);
210
211                         if (sess_mp == NULL) {
212                                 printf("Cannot create session pool on socket %d\n",
213                                         socket_id);
214                                 return -ENOMEM;
215                         }
216
217                         printf("Allocated session pool on socket %d\n", socket_id);
218                         session_pool_socket[socket_id] = sess_mp;
219                 }
220
221                 qp_conf.mp_session = session_pool_socket[socket_id];
222                 qp_conf.mp_session_private = session_pool_socket[socket_id];
223
224                 ret = rte_cryptodev_configure(cdev_id, &conf);
225                 if (ret < 0) {
226                         printf("Failed to configure cryptodev %u", cdev_id);
227                         return -EINVAL;
228                 }
229
230                 for (j = 0; j < opts->nb_qps; j++) {
231                         ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
232                                 &qp_conf, socket_id);
233                         if (ret < 0) {
234                                 printf("Failed to setup queue pair %u on "
235                                         "cryptodev %u", j, cdev_id);
236                                 return -EINVAL;
237                         }
238                 }
239
240                 ret = rte_cryptodev_start(cdev_id);
241                 if (ret < 0) {
242                         printf("Failed to start device %u: error %d\n",
243                                         cdev_id, ret);
244                         return -EPERM;
245                 }
246         }
247
248         return enabled_cdev_count;
249 }
250
251 static int
252 cperf_verify_devices_capabilities(struct cperf_options *opts,
253                 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
254 {
255         struct rte_cryptodev_sym_capability_idx cap_idx;
256         const struct rte_cryptodev_symmetric_capability *capability;
257
258         uint8_t i, cdev_id;
259         int ret;
260
261         for (i = 0; i < nb_cryptodevs; i++) {
262
263                 cdev_id = enabled_cdevs[i];
264
265                 if (opts->op_type == CPERF_AUTH_ONLY ||
266                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
267                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
268
269                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
270                         cap_idx.algo.auth = opts->auth_algo;
271
272                         capability = rte_cryptodev_sym_capability_get(cdev_id,
273                                         &cap_idx);
274                         if (capability == NULL)
275                                 return -1;
276
277                         ret = rte_cryptodev_sym_capability_check_auth(
278                                         capability,
279                                         opts->auth_key_sz,
280                                         opts->digest_sz,
281                                         opts->auth_iv_sz);
282                         if (ret != 0)
283                                 return ret;
284                 }
285
286                 if (opts->op_type == CPERF_CIPHER_ONLY ||
287                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
288                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
289
290                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
291                         cap_idx.algo.cipher = opts->cipher_algo;
292
293                         capability = rte_cryptodev_sym_capability_get(cdev_id,
294                                         &cap_idx);
295                         if (capability == NULL)
296                                 return -1;
297
298                         ret = rte_cryptodev_sym_capability_check_cipher(
299                                         capability,
300                                         opts->cipher_key_sz,
301                                         opts->cipher_iv_sz);
302                         if (ret != 0)
303                                 return ret;
304                 }
305
306                 if (opts->op_type == CPERF_AEAD) {
307
308                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
309                         cap_idx.algo.aead = opts->aead_algo;
310
311                         capability = rte_cryptodev_sym_capability_get(cdev_id,
312                                         &cap_idx);
313                         if (capability == NULL)
314                                 return -1;
315
316                         ret = rte_cryptodev_sym_capability_check_aead(
317                                         capability,
318                                         opts->aead_key_sz,
319                                         opts->digest_sz,
320                                         opts->aead_aad_sz,
321                                         opts->aead_iv_sz);
322                         if (ret != 0)
323                                 return ret;
324                 }
325         }
326
327         return 0;
328 }
329
330 static int
331 cperf_check_test_vector(struct cperf_options *opts,
332                 struct cperf_test_vector *test_vec)
333 {
334         if (opts->op_type == CPERF_CIPHER_ONLY) {
335                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
336                         if (test_vec->plaintext.data == NULL)
337                                 return -1;
338                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
339                         if (test_vec->plaintext.data == NULL)
340                                 return -1;
341                         if (test_vec->plaintext.length < opts->max_buffer_size)
342                                 return -1;
343                         if (test_vec->ciphertext.data == NULL)
344                                 return -1;
345                         if (test_vec->ciphertext.length < opts->max_buffer_size)
346                                 return -1;
347                         /* Cipher IV is only required for some algorithms */
348                         if (opts->cipher_iv_sz &&
349                                         test_vec->cipher_iv.data == NULL)
350                                 return -1;
351                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
352                                 return -1;
353                         if (test_vec->cipher_key.data == NULL)
354                                 return -1;
355                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
356                                 return -1;
357                 }
358         } else if (opts->op_type == CPERF_AUTH_ONLY) {
359                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
360                         if (test_vec->plaintext.data == NULL)
361                                 return -1;
362                         if (test_vec->plaintext.length < opts->max_buffer_size)
363                                 return -1;
364                         /* Auth key is only required for some algorithms */
365                         if (opts->auth_key_sz &&
366                                         test_vec->auth_key.data == NULL)
367                                 return -1;
368                         if (test_vec->auth_key.length != opts->auth_key_sz)
369                                 return -1;
370                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
371                                 return -1;
372                         /* Auth IV is only required for some algorithms */
373                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
374                                 return -1;
375                         if (test_vec->digest.data == NULL)
376                                 return -1;
377                         if (test_vec->digest.length < opts->digest_sz)
378                                 return -1;
379                 }
380
381         } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
382                         opts->op_type == CPERF_AUTH_THEN_CIPHER) {
383                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
384                         if (test_vec->plaintext.data == NULL)
385                                 return -1;
386                         if (test_vec->plaintext.length < opts->max_buffer_size)
387                                 return -1;
388                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
389                         if (test_vec->plaintext.data == NULL)
390                                 return -1;
391                         if (test_vec->plaintext.length < opts->max_buffer_size)
392                                 return -1;
393                         if (test_vec->ciphertext.data == NULL)
394                                 return -1;
395                         if (test_vec->ciphertext.length < opts->max_buffer_size)
396                                 return -1;
397                         if (test_vec->cipher_iv.data == NULL)
398                                 return -1;
399                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
400                                 return -1;
401                         if (test_vec->cipher_key.data == NULL)
402                                 return -1;
403                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
404                                 return -1;
405                 }
406                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
407                         if (test_vec->auth_key.data == NULL)
408                                 return -1;
409                         if (test_vec->auth_key.length != opts->auth_key_sz)
410                                 return -1;
411                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
412                                 return -1;
413                         /* Auth IV is only required for some algorithms */
414                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
415                                 return -1;
416                         if (test_vec->digest.data == NULL)
417                                 return -1;
418                         if (test_vec->digest.length < opts->digest_sz)
419                                 return -1;
420                 }
421         } else if (opts->op_type == CPERF_AEAD) {
422                 if (test_vec->plaintext.data == NULL)
423                         return -1;
424                 if (test_vec->plaintext.length < opts->max_buffer_size)
425                         return -1;
426                 if (test_vec->ciphertext.data == NULL)
427                         return -1;
428                 if (test_vec->ciphertext.length < opts->max_buffer_size)
429                         return -1;
430                 if (test_vec->aead_key.data == NULL)
431                         return -1;
432                 if (test_vec->aead_key.length != opts->aead_key_sz)
433                         return -1;
434                 if (test_vec->aead_iv.data == NULL)
435                         return -1;
436                 if (test_vec->aead_iv.length != opts->aead_iv_sz)
437                         return -1;
438                 if (test_vec->aad.data == NULL)
439                         return -1;
440                 if (test_vec->aad.length != opts->aead_aad_sz)
441                         return -1;
442                 if (test_vec->digest.data == NULL)
443                         return -1;
444                 if (test_vec->digest.length < opts->digest_sz)
445                         return -1;
446         }
447         return 0;
448 }
449
450 int
451 main(int argc, char **argv)
452 {
453         struct cperf_options opts = {0};
454         struct cperf_test_vector *t_vec = NULL;
455         struct cperf_op_fns op_fns;
456
457         void *ctx[RTE_MAX_LCORE] = { };
458         struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
459
460         int nb_cryptodevs = 0;
461         uint16_t total_nb_qps = 0;
462         uint8_t cdev_id, i;
463         uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
464
465         uint8_t buffer_size_idx = 0;
466
467         int ret;
468         uint32_t lcore_id;
469
470         /* Initialise DPDK EAL */
471         ret = rte_eal_init(argc, argv);
472         if (ret < 0)
473                 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
474         argc -= ret;
475         argv += ret;
476
477         cperf_options_default(&opts);
478
479         ret = cperf_options_parse(&opts, argc, argv);
480         if (ret) {
481                 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
482                 goto err;
483         }
484
485         ret = cperf_options_check(&opts);
486         if (ret) {
487                 RTE_LOG(ERR, USER1,
488                                 "Checking on or more user options failed\n");
489                 goto err;
490         }
491
492         nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
493                         session_pool_socket);
494
495         if (!opts.silent)
496                 cperf_options_dump(&opts);
497
498         if (nb_cryptodevs < 1) {
499                 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
500                                 "device type\n");
501                 nb_cryptodevs = 0;
502                 goto err;
503         }
504
505         ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
506                         nb_cryptodevs);
507         if (ret) {
508                 RTE_LOG(ERR, USER1, "Crypto device type does not support "
509                                 "capabilities requested\n");
510                 goto err;
511         }
512
513         if (opts.test_file != NULL) {
514                 t_vec = cperf_test_vector_get_from_file(&opts);
515                 if (t_vec == NULL) {
516                         RTE_LOG(ERR, USER1,
517                                         "Failed to create test vector for"
518                                         " specified file\n");
519                         goto err;
520                 }
521
522                 if (cperf_check_test_vector(&opts, t_vec)) {
523                         RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
524                                         "\n");
525                         goto err;
526                 }
527         } else {
528                 t_vec = cperf_test_vector_get_dummy(&opts);
529                 if (t_vec == NULL) {
530                         RTE_LOG(ERR, USER1,
531                                         "Failed to create test vector for"
532                                         " specified algorithms\n");
533                         goto err;
534                 }
535         }
536
537         ret = cperf_get_op_functions(&opts, &op_fns);
538         if (ret) {
539                 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
540                                 "specified algorithms combination\n");
541                 goto err;
542         }
543
544         if (!opts.silent)
545                 show_test_vector(t_vec);
546
547         total_nb_qps = nb_cryptodevs * opts.nb_qps;
548
549         i = 0;
550         uint8_t qp_id = 0, cdev_index = 0;
551         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
552
553                 if (i == total_nb_qps)
554                         break;
555
556                 cdev_id = enabled_cdevs[cdev_index];
557
558                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
559
560                 ctx[i] = cperf_testmap[opts.test].constructor(
561                                 session_pool_socket[socket_id], cdev_id, qp_id,
562                                 &opts, t_vec, &op_fns);
563                 if (ctx[i] == NULL) {
564                         RTE_LOG(ERR, USER1, "Test run constructor failed\n");
565                         goto err;
566                 }
567                 qp_id = (qp_id + 1) % opts.nb_qps;
568                 if (qp_id == 0)
569                         cdev_index++;
570                 i++;
571         }
572
573         if (opts.imix_distribution_count != 0) {
574                 uint8_t buffer_size_count = opts.buffer_size_count;
575                 uint16_t distribution_total[buffer_size_count];
576                 uint32_t op_idx;
577                 uint32_t test_average_size = 0;
578                 const uint32_t *buffer_size_list = opts.buffer_size_list;
579                 const uint32_t *imix_distribution_list = opts.imix_distribution_list;
580
581                 opts.imix_buffer_sizes = rte_malloc(NULL,
582                                         sizeof(uint32_t) * opts.pool_sz,
583                                         0);
584                 /*
585                  * Calculate accumulated distribution of
586                  * probabilities per packet size
587                  */
588                 distribution_total[0] = imix_distribution_list[0];
589                 for (i = 1; i < buffer_size_count; i++)
590                         distribution_total[i] = imix_distribution_list[i] +
591                                 distribution_total[i-1];
592
593                 /* Calculate a random sequence of packet sizes, based on distribution */
594                 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
595                         uint16_t random_number = rte_rand() %
596                                 distribution_total[buffer_size_count - 1];
597                         for (i = 0; i < buffer_size_count; i++)
598                                 if (random_number < distribution_total[i])
599                                         break;
600
601                         opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
602                 }
603
604                 /* Calculate average buffer size for the IMIX distribution */
605                 for (i = 0; i < buffer_size_count; i++)
606                         test_average_size += buffer_size_list[i] *
607                                 imix_distribution_list[i];
608
609                 opts.test_buffer_size = test_average_size /
610                                 distribution_total[buffer_size_count - 1];
611
612                 i = 0;
613                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
614
615                         if (i == total_nb_qps)
616                                 break;
617
618                         rte_eal_remote_launch(cperf_testmap[opts.test].runner,
619                                 ctx[i], lcore_id);
620                         i++;
621                 }
622                 i = 0;
623                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
624
625                         if (i == total_nb_qps)
626                                 break;
627                         rte_eal_wait_lcore(lcore_id);
628                         i++;
629                 }
630         } else {
631
632                 /* Get next size from range or list */
633                 if (opts.inc_buffer_size != 0)
634                         opts.test_buffer_size = opts.min_buffer_size;
635                 else
636                         opts.test_buffer_size = opts.buffer_size_list[0];
637
638                 while (opts.test_buffer_size <= opts.max_buffer_size) {
639                         i = 0;
640                         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
641
642                                 if (i == total_nb_qps)
643                                         break;
644
645                                 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
646                                         ctx[i], lcore_id);
647                                 i++;
648                         }
649                         i = 0;
650                         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
651
652                                 if (i == total_nb_qps)
653                                         break;
654                                 rte_eal_wait_lcore(lcore_id);
655                                 i++;
656                         }
657
658                         /* Get next size from range or list */
659                         if (opts.inc_buffer_size != 0)
660                                 opts.test_buffer_size += opts.inc_buffer_size;
661                         else {
662                                 if (++buffer_size_idx == opts.buffer_size_count)
663                                         break;
664                                 opts.test_buffer_size =
665                                         opts.buffer_size_list[buffer_size_idx];
666                         }
667                 }
668         }
669
670         i = 0;
671         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
672
673                 if (i == total_nb_qps)
674                         break;
675
676                 cperf_testmap[opts.test].destructor(ctx[i]);
677                 i++;
678         }
679
680         for (i = 0; i < nb_cryptodevs &&
681                         i < RTE_CRYPTO_MAX_DEVS; i++)
682                 rte_cryptodev_stop(enabled_cdevs[i]);
683
684         free_test_vector(t_vec, &opts);
685
686         printf("\n");
687         return EXIT_SUCCESS;
688
689 err:
690         i = 0;
691         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
692                 if (i == total_nb_qps)
693                         break;
694
695                 if (ctx[i] && cperf_testmap[opts.test].destructor)
696                         cperf_testmap[opts.test].destructor(ctx[i]);
697                 i++;
698         }
699
700         for (i = 0; i < nb_cryptodevs &&
701                         i < RTE_CRYPTO_MAX_DEVS; i++)
702                 rte_cryptodev_stop(enabled_cdevs[i]);
703         rte_free(opts.imix_buffer_sizes);
704         free_test_vector(t_vec, &opts);
705
706         printf("\n");
707         return EXIT_FAILURE;
708 }