app/testpmd: support RSS config in flow query
[dpdk.git] / app / test-crypto-perf / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include <rte_malloc.h>
9 #include <rte_random.h>
10 #include <rte_eal.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
14 #endif
15
16 #include "cperf.h"
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
23
24 static struct {
25         struct rte_mempool *sess_mp;
26         struct rte_mempool *priv_mp;
27 } session_pool_socket[RTE_MAX_NUMA_NODES];
28
29 const char *cperf_test_type_strs[] = {
30         [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
31         [CPERF_TEST_TYPE_LATENCY] = "latency",
32         [CPERF_TEST_TYPE_VERIFY] = "verify",
33         [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
34 };
35
36 const char *cperf_op_type_strs[] = {
37         [CPERF_CIPHER_ONLY] = "cipher-only",
38         [CPERF_AUTH_ONLY] = "auth-only",
39         [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
40         [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
41         [CPERF_AEAD] = "aead",
42         [CPERF_PDCP] = "pdcp",
43         [CPERF_DOCSIS] = "docsis"
44 };
45
46 const struct cperf_test cperf_testmap[] = {
47                 [CPERF_TEST_TYPE_THROUGHPUT] = {
48                                 cperf_throughput_test_constructor,
49                                 cperf_throughput_test_runner,
50                                 cperf_throughput_test_destructor
51                 },
52                 [CPERF_TEST_TYPE_LATENCY] = {
53                                 cperf_latency_test_constructor,
54                                 cperf_latency_test_runner,
55                                 cperf_latency_test_destructor
56                 },
57                 [CPERF_TEST_TYPE_VERIFY] = {
58                                 cperf_verify_test_constructor,
59                                 cperf_verify_test_runner,
60                                 cperf_verify_test_destructor
61                 },
62                 [CPERF_TEST_TYPE_PMDCC] = {
63                                 cperf_pmd_cyclecount_test_constructor,
64                                 cperf_pmd_cyclecount_test_runner,
65                                 cperf_pmd_cyclecount_test_destructor
66                 }
67 };
68
69 static int
70 fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size,
71                 uint32_t nb_sessions)
72 {
73         char mp_name[RTE_MEMPOOL_NAMESIZE];
74         struct rte_mempool *sess_mp;
75
76         if (session_pool_socket[socket_id].priv_mp == NULL) {
77                 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
78                         "priv_sess_mp_%u", socket_id);
79
80                 sess_mp = rte_mempool_create(mp_name,
81                                         nb_sessions,
82                                         session_priv_size,
83                                         0, 0, NULL, NULL, NULL,
84                                         NULL, socket_id,
85                                         0);
86
87                 if (sess_mp == NULL) {
88                         printf("Cannot create pool \"%s\" on socket %d\n",
89                                 mp_name, socket_id);
90                         return -ENOMEM;
91                 }
92
93                 printf("Allocated pool \"%s\" on socket %d\n",
94                         mp_name, socket_id);
95                 session_pool_socket[socket_id].priv_mp = sess_mp;
96         }
97
98         if (session_pool_socket[socket_id].sess_mp == NULL) {
99
100                 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
101                         "sess_mp_%u", socket_id);
102
103                 sess_mp = rte_cryptodev_sym_session_pool_create(mp_name,
104                                         nb_sessions, 0, 0, 0, socket_id);
105
106                 if (sess_mp == NULL) {
107                         printf("Cannot create pool \"%s\" on socket %d\n",
108                                 mp_name, socket_id);
109                         return -ENOMEM;
110                 }
111
112                 printf("Allocated pool \"%s\" on socket %d\n",
113                         mp_name, socket_id);
114                 session_pool_socket[socket_id].sess_mp = sess_mp;
115         }
116
117         return 0;
118 }
119
120 static int
121 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
122 {
123         uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
124         uint32_t sessions_needed = 0;
125         unsigned int i, j;
126         int ret;
127
128         enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
129                         enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
130         if (enabled_cdev_count == 0) {
131                 printf("No crypto devices type %s available\n",
132                                 opts->device_type);
133                 return -EINVAL;
134         }
135
136         nb_lcores = rte_lcore_count() - 1;
137
138         if (nb_lcores < 1) {
139                 RTE_LOG(ERR, USER1,
140                         "Number of enabled cores need to be higher than 1\n");
141                 return -EINVAL;
142         }
143
144         /*
145          * Use less number of devices,
146          * if there are more available than cores.
147          */
148         if (enabled_cdev_count > nb_lcores)
149                 enabled_cdev_count = nb_lcores;
150
151         /* Create a mempool shared by all the devices */
152         uint32_t max_sess_size = 0, sess_size;
153
154         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
155                 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
156                 if (sess_size > max_sess_size)
157                         max_sess_size = sess_size;
158         }
159
160         /*
161          * Calculate number of needed queue pairs, based on the amount
162          * of available number of logical cores and crypto devices.
163          * For instance, if there are 4 cores and 2 crypto devices,
164          * 2 queue pairs will be set up per device.
165          */
166         opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
167                                 (nb_lcores / enabled_cdev_count) + 1 :
168                                 nb_lcores / enabled_cdev_count;
169
170         for (i = 0; i < enabled_cdev_count &&
171                         i < RTE_CRYPTO_MAX_DEVS; i++) {
172                 cdev_id = enabled_cdevs[i];
173 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
174                 /*
175                  * If multi-core scheduler is used, limit the number
176                  * of queue pairs to 1, as there is no way to know
177                  * how many cores are being used by the PMD, and
178                  * how many will be available for the application.
179                  */
180                 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
181                                 rte_cryptodev_scheduler_mode_get(cdev_id) ==
182                                 CDEV_SCHED_MODE_MULTICORE)
183                         opts->nb_qps = 1;
184 #endif
185
186                 struct rte_cryptodev_info cdev_info;
187                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
188                 /* range check the socket_id - negative values become big
189                  * positive ones due to use of unsigned value
190                  */
191                 if (socket_id >= RTE_MAX_NUMA_NODES)
192                         socket_id = 0;
193
194                 rte_cryptodev_info_get(cdev_id, &cdev_info);
195                 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
196                         printf("Number of needed queue pairs is higher "
197                                 "than the maximum number of queue pairs "
198                                 "per device.\n");
199                         printf("Lower the number of cores or increase "
200                                 "the number of crypto devices\n");
201                         return -EINVAL;
202                 }
203                 struct rte_cryptodev_config conf = {
204                         .nb_queue_pairs = opts->nb_qps,
205                         .socket_id = socket_id,
206                         .ff_disable = RTE_CRYPTODEV_FF_SECURITY |
207                                       RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO,
208                 };
209
210                 struct rte_cryptodev_qp_conf qp_conf = {
211                         .nb_descriptors = opts->nb_descriptors
212                 };
213
214                 /**
215                  * Device info specifies the min headroom and tailroom
216                  * requirement for the crypto PMD. This need to be honoured
217                  * by the application, while creating mbuf.
218                  */
219                 if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
220                         /* Update headroom */
221                         opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
222                 }
223                 if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
224                         /* Update tailroom */
225                         opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
226                 }
227
228                 /* Update segment size to include headroom & tailroom */
229                 opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
230
231                 uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
232                 /*
233                  * Two sessions objects are required for each session
234                  * (one for the header, one for the private data)
235                  */
236                 if (!strcmp((const char *)opts->device_type,
237                                         "crypto_scheduler")) {
238 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
239                         uint32_t nb_slaves =
240                                 rte_cryptodev_scheduler_slaves_get(cdev_id,
241                                                                 NULL);
242
243                         sessions_needed = enabled_cdev_count *
244                                 opts->nb_qps * nb_slaves;
245 #endif
246                 } else
247                         sessions_needed = enabled_cdev_count *
248                                                 opts->nb_qps * 2;
249
250                 /*
251                  * A single session is required per queue pair
252                  * in each device
253                  */
254                 if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
255                         RTE_LOG(ERR, USER1,
256                                 "Device does not support at least "
257                                 "%u sessions\n", opts->nb_qps);
258                         return -ENOTSUP;
259                 }
260
261                 ret = fill_session_pool_socket(socket_id, max_sess_size,
262                                 sessions_needed);
263                 if (ret < 0)
264                         return ret;
265
266                 qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
267                 qp_conf.mp_session_private =
268                                 session_pool_socket[socket_id].priv_mp;
269
270                 ret = rte_cryptodev_configure(cdev_id, &conf);
271                 if (ret < 0) {
272                         printf("Failed to configure cryptodev %u", cdev_id);
273                         return -EINVAL;
274                 }
275
276                 for (j = 0; j < opts->nb_qps; j++) {
277                         ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
278                                 &qp_conf, socket_id);
279                         if (ret < 0) {
280                                 printf("Failed to setup queue pair %u on "
281                                         "cryptodev %u", j, cdev_id);
282                                 return -EINVAL;
283                         }
284                 }
285
286                 ret = rte_cryptodev_start(cdev_id);
287                 if (ret < 0) {
288                         printf("Failed to start device %u: error %d\n",
289                                         cdev_id, ret);
290                         return -EPERM;
291                 }
292         }
293
294         return enabled_cdev_count;
295 }
296
297 static int
298 cperf_verify_devices_capabilities(struct cperf_options *opts,
299                 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
300 {
301         struct rte_cryptodev_sym_capability_idx cap_idx;
302         const struct rte_cryptodev_symmetric_capability *capability;
303
304         uint8_t i, cdev_id;
305         int ret;
306
307         for (i = 0; i < nb_cryptodevs; i++) {
308
309                 cdev_id = enabled_cdevs[i];
310
311                 if (opts->op_type == CPERF_AUTH_ONLY ||
312                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
313                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
314
315                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
316                         cap_idx.algo.auth = opts->auth_algo;
317
318                         capability = rte_cryptodev_sym_capability_get(cdev_id,
319                                         &cap_idx);
320                         if (capability == NULL)
321                                 return -1;
322
323                         ret = rte_cryptodev_sym_capability_check_auth(
324                                         capability,
325                                         opts->auth_key_sz,
326                                         opts->digest_sz,
327                                         opts->auth_iv_sz);
328                         if (ret != 0)
329                                 return ret;
330                 }
331
332                 if (opts->op_type == CPERF_CIPHER_ONLY ||
333                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
334                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
335
336                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
337                         cap_idx.algo.cipher = opts->cipher_algo;
338
339                         capability = rte_cryptodev_sym_capability_get(cdev_id,
340                                         &cap_idx);
341                         if (capability == NULL)
342                                 return -1;
343
344                         ret = rte_cryptodev_sym_capability_check_cipher(
345                                         capability,
346                                         opts->cipher_key_sz,
347                                         opts->cipher_iv_sz);
348                         if (ret != 0)
349                                 return ret;
350                 }
351
352                 if (opts->op_type == CPERF_AEAD) {
353
354                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
355                         cap_idx.algo.aead = opts->aead_algo;
356
357                         capability = rte_cryptodev_sym_capability_get(cdev_id,
358                                         &cap_idx);
359                         if (capability == NULL)
360                                 return -1;
361
362                         ret = rte_cryptodev_sym_capability_check_aead(
363                                         capability,
364                                         opts->aead_key_sz,
365                                         opts->digest_sz,
366                                         opts->aead_aad_sz,
367                                         opts->aead_iv_sz);
368                         if (ret != 0)
369                                 return ret;
370                 }
371         }
372
373         return 0;
374 }
375
376 static int
377 cperf_check_test_vector(struct cperf_options *opts,
378                 struct cperf_test_vector *test_vec)
379 {
380         if (opts->op_type == CPERF_CIPHER_ONLY) {
381                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
382                         if (test_vec->plaintext.data == NULL)
383                                 return -1;
384                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
385                         if (test_vec->plaintext.data == NULL)
386                                 return -1;
387                         if (test_vec->plaintext.length < opts->max_buffer_size)
388                                 return -1;
389                         if (test_vec->ciphertext.data == NULL)
390                                 return -1;
391                         if (test_vec->ciphertext.length < opts->max_buffer_size)
392                                 return -1;
393                         /* Cipher IV is only required for some algorithms */
394                         if (opts->cipher_iv_sz &&
395                                         test_vec->cipher_iv.data == NULL)
396                                 return -1;
397                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
398                                 return -1;
399                         if (test_vec->cipher_key.data == NULL)
400                                 return -1;
401                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
402                                 return -1;
403                 }
404         } else if (opts->op_type == CPERF_AUTH_ONLY) {
405                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
406                         if (test_vec->plaintext.data == NULL)
407                                 return -1;
408                         if (test_vec->plaintext.length < opts->max_buffer_size)
409                                 return -1;
410                         /* Auth key is only required for some algorithms */
411                         if (opts->auth_key_sz &&
412                                         test_vec->auth_key.data == NULL)
413                                 return -1;
414                         if (test_vec->auth_key.length != opts->auth_key_sz)
415                                 return -1;
416                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
417                                 return -1;
418                         /* Auth IV is only required for some algorithms */
419                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
420                                 return -1;
421                         if (test_vec->digest.data == NULL)
422                                 return -1;
423                         if (test_vec->digest.length < opts->digest_sz)
424                                 return -1;
425                 }
426
427         } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
428                         opts->op_type == CPERF_AUTH_THEN_CIPHER) {
429                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
430                         if (test_vec->plaintext.data == NULL)
431                                 return -1;
432                         if (test_vec->plaintext.length < opts->max_buffer_size)
433                                 return -1;
434                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
435                         if (test_vec->plaintext.data == NULL)
436                                 return -1;
437                         if (test_vec->plaintext.length < opts->max_buffer_size)
438                                 return -1;
439                         if (test_vec->ciphertext.data == NULL)
440                                 return -1;
441                         if (test_vec->ciphertext.length < opts->max_buffer_size)
442                                 return -1;
443                         if (test_vec->cipher_iv.data == NULL)
444                                 return -1;
445                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
446                                 return -1;
447                         if (test_vec->cipher_key.data == NULL)
448                                 return -1;
449                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
450                                 return -1;
451                 }
452                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
453                         if (test_vec->auth_key.data == NULL)
454                                 return -1;
455                         if (test_vec->auth_key.length != opts->auth_key_sz)
456                                 return -1;
457                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
458                                 return -1;
459                         /* Auth IV is only required for some algorithms */
460                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
461                                 return -1;
462                         if (test_vec->digest.data == NULL)
463                                 return -1;
464                         if (test_vec->digest.length < opts->digest_sz)
465                                 return -1;
466                 }
467         } else if (opts->op_type == CPERF_AEAD) {
468                 if (test_vec->plaintext.data == NULL)
469                         return -1;
470                 if (test_vec->plaintext.length < opts->max_buffer_size)
471                         return -1;
472                 if (test_vec->ciphertext.data == NULL)
473                         return -1;
474                 if (test_vec->ciphertext.length < opts->max_buffer_size)
475                         return -1;
476                 if (test_vec->aead_key.data == NULL)
477                         return -1;
478                 if (test_vec->aead_key.length != opts->aead_key_sz)
479                         return -1;
480                 if (test_vec->aead_iv.data == NULL)
481                         return -1;
482                 if (test_vec->aead_iv.length != opts->aead_iv_sz)
483                         return -1;
484                 if (test_vec->aad.data == NULL)
485                         return -1;
486                 if (test_vec->aad.length != opts->aead_aad_sz)
487                         return -1;
488                 if (test_vec->digest.data == NULL)
489                         return -1;
490                 if (test_vec->digest.length < opts->digest_sz)
491                         return -1;
492         }
493         return 0;
494 }
495
496 int
497 main(int argc, char **argv)
498 {
499         struct cperf_options opts = {0};
500         struct cperf_test_vector *t_vec = NULL;
501         struct cperf_op_fns op_fns;
502         void *ctx[RTE_MAX_LCORE] = { };
503         int nb_cryptodevs = 0;
504         uint16_t total_nb_qps = 0;
505         uint8_t cdev_id, i;
506         uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
507
508         uint8_t buffer_size_idx = 0;
509
510         int ret;
511         uint32_t lcore_id;
512
513         /* Initialise DPDK EAL */
514         ret = rte_eal_init(argc, argv);
515         if (ret < 0)
516                 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
517         argc -= ret;
518         argv += ret;
519
520         cperf_options_default(&opts);
521
522         ret = cperf_options_parse(&opts, argc, argv);
523         if (ret) {
524                 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
525                 goto err;
526         }
527
528         ret = cperf_options_check(&opts);
529         if (ret) {
530                 RTE_LOG(ERR, USER1,
531                                 "Checking on or more user options failed\n");
532                 goto err;
533         }
534
535         nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
536
537         if (!opts.silent)
538                 cperf_options_dump(&opts);
539
540         if (nb_cryptodevs < 1) {
541                 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
542                                 "device type\n");
543                 nb_cryptodevs = 0;
544                 goto err;
545         }
546
547         ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
548                         nb_cryptodevs);
549         if (ret) {
550                 RTE_LOG(ERR, USER1, "Crypto device type does not support "
551                                 "capabilities requested\n");
552                 goto err;
553         }
554
555         if (opts.test_file != NULL) {
556                 t_vec = cperf_test_vector_get_from_file(&opts);
557                 if (t_vec == NULL) {
558                         RTE_LOG(ERR, USER1,
559                                         "Failed to create test vector for"
560                                         " specified file\n");
561                         goto err;
562                 }
563
564                 if (cperf_check_test_vector(&opts, t_vec)) {
565                         RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
566                                         "\n");
567                         goto err;
568                 }
569         } else {
570                 t_vec = cperf_test_vector_get_dummy(&opts);
571                 if (t_vec == NULL) {
572                         RTE_LOG(ERR, USER1,
573                                         "Failed to create test vector for"
574                                         " specified algorithms\n");
575                         goto err;
576                 }
577         }
578
579         ret = cperf_get_op_functions(&opts, &op_fns);
580         if (ret) {
581                 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
582                                 "specified algorithms combination\n");
583                 goto err;
584         }
585
586         if (!opts.silent && opts.test != CPERF_TEST_TYPE_THROUGHPUT &&
587                         opts.test != CPERF_TEST_TYPE_LATENCY)
588                 show_test_vector(t_vec);
589
590         total_nb_qps = nb_cryptodevs * opts.nb_qps;
591
592         i = 0;
593         uint8_t qp_id = 0, cdev_index = 0;
594         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
595
596                 if (i == total_nb_qps)
597                         break;
598
599                 cdev_id = enabled_cdevs[cdev_index];
600
601                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
602
603                 ctx[i] = cperf_testmap[opts.test].constructor(
604                                 session_pool_socket[socket_id].sess_mp,
605                                 session_pool_socket[socket_id].priv_mp,
606                                 cdev_id, qp_id,
607                                 &opts, t_vec, &op_fns);
608                 if (ctx[i] == NULL) {
609                         RTE_LOG(ERR, USER1, "Test run constructor failed\n");
610                         goto err;
611                 }
612                 qp_id = (qp_id + 1) % opts.nb_qps;
613                 if (qp_id == 0)
614                         cdev_index++;
615                 i++;
616         }
617
618         if (opts.imix_distribution_count != 0) {
619                 uint8_t buffer_size_count = opts.buffer_size_count;
620                 uint16_t distribution_total[buffer_size_count];
621                 uint32_t op_idx;
622                 uint32_t test_average_size = 0;
623                 const uint32_t *buffer_size_list = opts.buffer_size_list;
624                 const uint32_t *imix_distribution_list = opts.imix_distribution_list;
625
626                 opts.imix_buffer_sizes = rte_malloc(NULL,
627                                         sizeof(uint32_t) * opts.pool_sz,
628                                         0);
629                 /*
630                  * Calculate accumulated distribution of
631                  * probabilities per packet size
632                  */
633                 distribution_total[0] = imix_distribution_list[0];
634                 for (i = 1; i < buffer_size_count; i++)
635                         distribution_total[i] = imix_distribution_list[i] +
636                                 distribution_total[i-1];
637
638                 /* Calculate a random sequence of packet sizes, based on distribution */
639                 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
640                         uint16_t random_number = rte_rand() %
641                                 distribution_total[buffer_size_count - 1];
642                         for (i = 0; i < buffer_size_count; i++)
643                                 if (random_number < distribution_total[i])
644                                         break;
645
646                         opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
647                 }
648
649                 /* Calculate average buffer size for the IMIX distribution */
650                 for (i = 0; i < buffer_size_count; i++)
651                         test_average_size += buffer_size_list[i] *
652                                 imix_distribution_list[i];
653
654                 opts.test_buffer_size = test_average_size /
655                                 distribution_total[buffer_size_count - 1];
656
657                 i = 0;
658                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
659
660                         if (i == total_nb_qps)
661                                 break;
662
663                         rte_eal_remote_launch(cperf_testmap[opts.test].runner,
664                                 ctx[i], lcore_id);
665                         i++;
666                 }
667                 i = 0;
668                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
669
670                         if (i == total_nb_qps)
671                                 break;
672                         ret |= rte_eal_wait_lcore(lcore_id);
673                         i++;
674                 }
675
676                 if (ret != EXIT_SUCCESS)
677                         goto err;
678         } else {
679
680                 /* Get next size from range or list */
681                 if (opts.inc_buffer_size != 0)
682                         opts.test_buffer_size = opts.min_buffer_size;
683                 else
684                         opts.test_buffer_size = opts.buffer_size_list[0];
685
686                 while (opts.test_buffer_size <= opts.max_buffer_size) {
687                         i = 0;
688                         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
689
690                                 if (i == total_nb_qps)
691                                         break;
692
693                                 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
694                                         ctx[i], lcore_id);
695                                 i++;
696                         }
697                         i = 0;
698                         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
699
700                                 if (i == total_nb_qps)
701                                         break;
702                                 ret |= rte_eal_wait_lcore(lcore_id);
703                                 i++;
704                         }
705
706                         if (ret != EXIT_SUCCESS)
707                                 goto err;
708
709                         /* Get next size from range or list */
710                         if (opts.inc_buffer_size != 0)
711                                 opts.test_buffer_size += opts.inc_buffer_size;
712                         else {
713                                 if (++buffer_size_idx == opts.buffer_size_count)
714                                         break;
715                                 opts.test_buffer_size =
716                                         opts.buffer_size_list[buffer_size_idx];
717                         }
718                 }
719         }
720
721         i = 0;
722         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
723
724                 if (i == total_nb_qps)
725                         break;
726
727                 cperf_testmap[opts.test].destructor(ctx[i]);
728                 i++;
729         }
730
731         for (i = 0; i < nb_cryptodevs &&
732                         i < RTE_CRYPTO_MAX_DEVS; i++)
733                 rte_cryptodev_stop(enabled_cdevs[i]);
734
735         free_test_vector(t_vec, &opts);
736
737         printf("\n");
738         return EXIT_SUCCESS;
739
740 err:
741         i = 0;
742         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
743                 if (i == total_nb_qps)
744                         break;
745
746                 if (ctx[i] && cperf_testmap[opts.test].destructor)
747                         cperf_testmap[opts.test].destructor(ctx[i]);
748                 i++;
749         }
750
751         for (i = 0; i < nb_cryptodevs &&
752                         i < RTE_CRYPTO_MAX_DEVS; i++)
753                 rte_cryptodev_stop(enabled_cdevs[i]);
754         rte_free(opts.imix_buffer_sizes);
755         free_test_vector(t_vec, &opts);
756
757         printf("\n");
758         return EXIT_FAILURE;
759 }