event/dlb2: fix check of QID in-flight
[dpdk.git] / app / test-crypto-perf / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include <rte_malloc.h>
9 #include <rte_random.h>
10 #include <rte_eal.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
14 #endif
15
16 #include "cperf.h"
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
23
24 static struct {
25         struct rte_mempool *sess_mp;
26         struct rte_mempool *priv_mp;
27 } session_pool_socket[RTE_MAX_NUMA_NODES];
28
29 const char *cperf_test_type_strs[] = {
30         [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
31         [CPERF_TEST_TYPE_LATENCY] = "latency",
32         [CPERF_TEST_TYPE_VERIFY] = "verify",
33         [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
34 };
35
36 const char *cperf_op_type_strs[] = {
37         [CPERF_CIPHER_ONLY] = "cipher-only",
38         [CPERF_AUTH_ONLY] = "auth-only",
39         [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
40         [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
41         [CPERF_AEAD] = "aead",
42         [CPERF_PDCP] = "pdcp",
43         [CPERF_DOCSIS] = "docsis",
44         [CPERF_IPSEC] = "ipsec",
45         [CPERF_ASYM_MODEX] = "modex"
46 };
47
48 const struct cperf_test cperf_testmap[] = {
49                 [CPERF_TEST_TYPE_THROUGHPUT] = {
50                                 cperf_throughput_test_constructor,
51                                 cperf_throughput_test_runner,
52                                 cperf_throughput_test_destructor
53                 },
54                 [CPERF_TEST_TYPE_LATENCY] = {
55                                 cperf_latency_test_constructor,
56                                 cperf_latency_test_runner,
57                                 cperf_latency_test_destructor
58                 },
59                 [CPERF_TEST_TYPE_VERIFY] = {
60                                 cperf_verify_test_constructor,
61                                 cperf_verify_test_runner,
62                                 cperf_verify_test_destructor
63                 },
64                 [CPERF_TEST_TYPE_PMDCC] = {
65                                 cperf_pmd_cyclecount_test_constructor,
66                                 cperf_pmd_cyclecount_test_runner,
67                                 cperf_pmd_cyclecount_test_destructor
68                 }
69 };
70
71 static int
72 create_asym_op_pool_socket(int32_t socket_id, uint32_t nb_sessions)
73 {
74         char mp_name[RTE_MEMPOOL_NAMESIZE];
75         struct rte_mempool *mpool = NULL;
76
77         if (session_pool_socket[socket_id].sess_mp == NULL) {
78                 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_sess_pool%u",
79                          socket_id);
80                 mpool = rte_cryptodev_asym_session_pool_create(mp_name,
81                                 nb_sessions, 0, 0, socket_id);
82                 if (mpool == NULL) {
83                         printf("Cannot create pool \"%s\" on socket %d\n",
84                                mp_name, socket_id);
85                         return -ENOMEM;
86                 }
87                 session_pool_socket[socket_id].sess_mp = mpool;
88         }
89         return 0;
90 }
91
92 static int
93 fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size,
94                 uint32_t nb_sessions)
95 {
96         char mp_name[RTE_MEMPOOL_NAMESIZE];
97         struct rte_mempool *sess_mp;
98
99         if (session_pool_socket[socket_id].priv_mp == NULL) {
100                 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
101                         "priv_sess_mp_%u", socket_id);
102
103                 sess_mp = rte_mempool_create(mp_name,
104                                         nb_sessions,
105                                         session_priv_size,
106                                         0, 0, NULL, NULL, NULL,
107                                         NULL, socket_id,
108                                         0);
109
110                 if (sess_mp == NULL) {
111                         printf("Cannot create pool \"%s\" on socket %d\n",
112                                 mp_name, socket_id);
113                         return -ENOMEM;
114                 }
115
116                 printf("Allocated pool \"%s\" on socket %d\n",
117                         mp_name, socket_id);
118                 session_pool_socket[socket_id].priv_mp = sess_mp;
119         }
120
121         if (session_pool_socket[socket_id].sess_mp == NULL) {
122
123                 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
124                         "sess_mp_%u", socket_id);
125
126                 sess_mp = rte_cryptodev_sym_session_pool_create(mp_name,
127                                         nb_sessions, 0, 0, 0, socket_id);
128
129                 if (sess_mp == NULL) {
130                         printf("Cannot create pool \"%s\" on socket %d\n",
131                                 mp_name, socket_id);
132                         return -ENOMEM;
133                 }
134
135                 printf("Allocated pool \"%s\" on socket %d\n",
136                         mp_name, socket_id);
137                 session_pool_socket[socket_id].sess_mp = sess_mp;
138         }
139
140         return 0;
141 }
142
143 static int
144 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
145 {
146         uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
147         uint32_t sessions_needed = 0;
148         unsigned int i, j;
149         int ret;
150
151         enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
152                         enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
153         if (enabled_cdev_count == 0) {
154                 printf("No crypto devices type %s available\n",
155                                 opts->device_type);
156                 return -EINVAL;
157         }
158
159         nb_lcores = rte_lcore_count() - 1;
160
161         if (nb_lcores < 1) {
162                 RTE_LOG(ERR, USER1,
163                         "Number of enabled cores need to be higher than 1\n");
164                 return -EINVAL;
165         }
166
167         /*
168          * Use less number of devices,
169          * if there are more available than cores.
170          */
171         if (enabled_cdev_count > nb_lcores)
172                 enabled_cdev_count = nb_lcores;
173
174         /* Create a mempool shared by all the devices */
175         uint32_t max_sess_size = 0, sess_size;
176
177         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
178                 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
179                 if (sess_size > max_sess_size)
180                         max_sess_size = sess_size;
181         }
182 #ifdef RTE_LIB_SECURITY
183         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
184                 sess_size = rte_security_session_get_size(
185                                 rte_cryptodev_get_sec_ctx(cdev_id));
186                 if (sess_size > max_sess_size)
187                         max_sess_size = sess_size;
188         }
189 #endif
190         /*
191          * Calculate number of needed queue pairs, based on the amount
192          * of available number of logical cores and crypto devices.
193          * For instance, if there are 4 cores and 2 crypto devices,
194          * 2 queue pairs will be set up per device.
195          */
196         opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
197                                 (nb_lcores / enabled_cdev_count) + 1 :
198                                 nb_lcores / enabled_cdev_count;
199
200         for (i = 0; i < enabled_cdev_count &&
201                         i < RTE_CRYPTO_MAX_DEVS; i++) {
202                 cdev_id = enabled_cdevs[i];
203 #ifdef RTE_CRYPTO_SCHEDULER
204                 /*
205                  * If multi-core scheduler is used, limit the number
206                  * of queue pairs to 1, as there is no way to know
207                  * how many cores are being used by the PMD, and
208                  * how many will be available for the application.
209                  */
210                 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
211                                 rte_cryptodev_scheduler_mode_get(cdev_id) ==
212                                 CDEV_SCHED_MODE_MULTICORE)
213                         opts->nb_qps = 1;
214 #endif
215
216                 struct rte_cryptodev_info cdev_info;
217                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
218                 /* range check the socket_id - negative values become big
219                  * positive ones due to use of unsigned value
220                  */
221                 if (socket_id >= RTE_MAX_NUMA_NODES)
222                         socket_id = 0;
223
224                 rte_cryptodev_info_get(cdev_id, &cdev_info);
225
226                 if (opts->op_type == CPERF_ASYM_MODEX) {
227                         if ((cdev_info.feature_flags &
228                              RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) == 0)
229                                 continue;
230                 }
231
232                 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
233                         printf("Number of needed queue pairs is higher "
234                                 "than the maximum number of queue pairs "
235                                 "per device.\n");
236                         printf("Lower the number of cores or increase "
237                                 "the number of crypto devices\n");
238                         return -EINVAL;
239                 }
240                 struct rte_cryptodev_config conf = {
241                         .nb_queue_pairs = opts->nb_qps,
242                         .socket_id = socket_id,
243                 };
244
245                 switch (opts->op_type) {
246                 case CPERF_ASYM_MODEX:
247                         conf.ff_disable |= (RTE_CRYPTODEV_FF_SECURITY |
248                                             RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO);
249                         break;
250                 case CPERF_CIPHER_ONLY:
251                 case CPERF_AUTH_ONLY:
252                 case CPERF_CIPHER_THEN_AUTH:
253                 case CPERF_AUTH_THEN_CIPHER:
254                 case CPERF_AEAD:
255                         conf.ff_disable |= RTE_CRYPTODEV_FF_SECURITY;
256                         /* Fall through */
257                 case CPERF_PDCP:
258                 case CPERF_DOCSIS:
259                 case CPERF_IPSEC:
260                         /* Fall through */
261                 default:
262                         conf.ff_disable |= RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
263                 }
264
265                 struct rte_cryptodev_qp_conf qp_conf = {
266                         .nb_descriptors = opts->nb_descriptors
267                 };
268
269                 /**
270                  * Device info specifies the min headroom and tailroom
271                  * requirement for the crypto PMD. This need to be honoured
272                  * by the application, while creating mbuf.
273                  */
274                 if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
275                         /* Update headroom */
276                         opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
277                 }
278                 if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
279                         /* Update tailroom */
280                         opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
281                 }
282
283                 /* Update segment size to include headroom & tailroom */
284                 opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
285
286                 uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
287                 /*
288                  * Two sessions objects are required for each session
289                  * (one for the header, one for the private data)
290                  */
291                 if (!strcmp((const char *)opts->device_type,
292                                         "crypto_scheduler")) {
293 #ifdef RTE_CRYPTO_SCHEDULER
294                         uint32_t nb_slaves =
295                                 rte_cryptodev_scheduler_workers_get(cdev_id,
296                                                                 NULL);
297
298                         sessions_needed = enabled_cdev_count *
299                                 opts->nb_qps * nb_slaves;
300 #endif
301                 } else
302                         sessions_needed = enabled_cdev_count * opts->nb_qps;
303
304                 /*
305                  * A single session is required per queue pair
306                  * in each device
307                  */
308                 if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
309                         RTE_LOG(ERR, USER1,
310                                 "Device does not support at least "
311                                 "%u sessions\n", opts->nb_qps);
312                         return -ENOTSUP;
313                 }
314
315                 if (opts->op_type == CPERF_ASYM_MODEX)
316                         ret = create_asym_op_pool_socket(socket_id,
317                                                          sessions_needed);
318                 else
319                         ret = fill_session_pool_socket(socket_id, max_sess_size,
320                                                        sessions_needed);
321                 if (ret < 0)
322                         return ret;
323
324                 qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
325                 qp_conf.mp_session_private =
326                                 session_pool_socket[socket_id].priv_mp;
327
328                 if (opts->op_type == CPERF_ASYM_MODEX) {
329                         qp_conf.mp_session = NULL;
330                         qp_conf.mp_session_private = NULL;
331                 }
332
333                 ret = rte_cryptodev_configure(cdev_id, &conf);
334                 if (ret < 0) {
335                         printf("Failed to configure cryptodev %u", cdev_id);
336                         return -EINVAL;
337                 }
338
339                 for (j = 0; j < opts->nb_qps; j++) {
340                         ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
341                                 &qp_conf, socket_id);
342                         if (ret < 0) {
343                                 printf("Failed to setup queue pair %u on "
344                                         "cryptodev %u", j, cdev_id);
345                                 return -EINVAL;
346                         }
347                 }
348
349                 ret = rte_cryptodev_start(cdev_id);
350                 if (ret < 0) {
351                         printf("Failed to start device %u: error %d\n",
352                                         cdev_id, ret);
353                         return -EPERM;
354                 }
355         }
356
357         return enabled_cdev_count;
358 }
359
360 static int
361 cperf_verify_devices_capabilities(struct cperf_options *opts,
362                 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
363 {
364         struct rte_cryptodev_sym_capability_idx cap_idx;
365         const struct rte_cryptodev_symmetric_capability *capability;
366         struct rte_cryptodev_asym_capability_idx asym_cap_idx;
367         const struct rte_cryptodev_asymmetric_xform_capability *asym_capability;
368
369
370         uint8_t i, cdev_id;
371         int ret;
372
373         for (i = 0; i < nb_cryptodevs; i++) {
374
375                 cdev_id = enabled_cdevs[i];
376
377                 if (opts->op_type == CPERF_ASYM_MODEX) {
378                         asym_cap_idx.type = RTE_CRYPTO_ASYM_XFORM_MODEX;
379                         asym_capability = rte_cryptodev_asym_capability_get(
380                                 cdev_id, &asym_cap_idx);
381                         if (asym_capability == NULL)
382                                 return -1;
383
384                         ret = rte_cryptodev_asym_xform_capability_check_modlen(
385                                 asym_capability, opts->modex_data->modulus.len);
386                         if (ret != 0)
387                                 return ret;
388
389                 }
390
391                 if (opts->op_type == CPERF_AUTH_ONLY ||
392                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
393                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
394
395                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
396                         cap_idx.algo.auth = opts->auth_algo;
397
398                         capability = rte_cryptodev_sym_capability_get(cdev_id,
399                                         &cap_idx);
400                         if (capability == NULL)
401                                 return -1;
402
403                         ret = rte_cryptodev_sym_capability_check_auth(
404                                         capability,
405                                         opts->auth_key_sz,
406                                         opts->digest_sz,
407                                         opts->auth_iv_sz);
408                         if (ret != 0)
409                                 return ret;
410                 }
411
412                 if (opts->op_type == CPERF_CIPHER_ONLY ||
413                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
414                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
415
416                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
417                         cap_idx.algo.cipher = opts->cipher_algo;
418
419                         capability = rte_cryptodev_sym_capability_get(cdev_id,
420                                         &cap_idx);
421                         if (capability == NULL)
422                                 return -1;
423
424                         ret = rte_cryptodev_sym_capability_check_cipher(
425                                         capability,
426                                         opts->cipher_key_sz,
427                                         opts->cipher_iv_sz);
428                         if (ret != 0)
429                                 return ret;
430                 }
431
432                 if (opts->op_type == CPERF_AEAD) {
433
434                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
435                         cap_idx.algo.aead = opts->aead_algo;
436
437                         capability = rte_cryptodev_sym_capability_get(cdev_id,
438                                         &cap_idx);
439                         if (capability == NULL)
440                                 return -1;
441
442                         ret = rte_cryptodev_sym_capability_check_aead(
443                                         capability,
444                                         opts->aead_key_sz,
445                                         opts->digest_sz,
446                                         opts->aead_aad_sz,
447                                         opts->aead_iv_sz);
448                         if (ret != 0)
449                                 return ret;
450                 }
451         }
452
453         return 0;
454 }
455
456 static int
457 cperf_check_test_vector(struct cperf_options *opts,
458                 struct cperf_test_vector *test_vec)
459 {
460         if (opts->op_type == CPERF_CIPHER_ONLY) {
461                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
462                         if (test_vec->plaintext.data == NULL)
463                                 return -1;
464                 } else {
465                         if (test_vec->plaintext.data == NULL)
466                                 return -1;
467                         if (test_vec->plaintext.length < opts->max_buffer_size)
468                                 return -1;
469                         if (test_vec->ciphertext.data == NULL)
470                                 return -1;
471                         if (test_vec->ciphertext.length < opts->max_buffer_size)
472                                 return -1;
473                         /* Cipher IV is only required for some algorithms */
474                         if (opts->cipher_iv_sz &&
475                                         test_vec->cipher_iv.data == NULL)
476                                 return -1;
477                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
478                                 return -1;
479                         if (test_vec->cipher_key.data == NULL)
480                                 return -1;
481                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
482                                 return -1;
483                 }
484         } else if (opts->op_type == CPERF_AUTH_ONLY) {
485                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
486                         if (test_vec->plaintext.data == NULL)
487                                 return -1;
488                         if (test_vec->plaintext.length < opts->max_buffer_size)
489                                 return -1;
490                         /* Auth key is only required for some algorithms */
491                         if (opts->auth_key_sz &&
492                                         test_vec->auth_key.data == NULL)
493                                 return -1;
494                         if (test_vec->auth_key.length != opts->auth_key_sz)
495                                 return -1;
496                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
497                                 return -1;
498                         /* Auth IV is only required for some algorithms */
499                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
500                                 return -1;
501                         if (test_vec->digest.data == NULL)
502                                 return -1;
503                         if (test_vec->digest.length < opts->digest_sz)
504                                 return -1;
505                 }
506
507         } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
508                         opts->op_type == CPERF_AUTH_THEN_CIPHER) {
509                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
510                         if (test_vec->plaintext.data == NULL)
511                                 return -1;
512                         if (test_vec->plaintext.length < opts->max_buffer_size)
513                                 return -1;
514                 } else {
515                         if (test_vec->plaintext.data == NULL)
516                                 return -1;
517                         if (test_vec->plaintext.length < opts->max_buffer_size)
518                                 return -1;
519                         if (test_vec->ciphertext.data == NULL)
520                                 return -1;
521                         if (test_vec->ciphertext.length < opts->max_buffer_size)
522                                 return -1;
523                         if (test_vec->cipher_iv.data == NULL)
524                                 return -1;
525                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
526                                 return -1;
527                         if (test_vec->cipher_key.data == NULL)
528                                 return -1;
529                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
530                                 return -1;
531                 }
532                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
533                         if (test_vec->auth_key.data == NULL)
534                                 return -1;
535                         if (test_vec->auth_key.length != opts->auth_key_sz)
536                                 return -1;
537                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
538                                 return -1;
539                         /* Auth IV is only required for some algorithms */
540                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
541                                 return -1;
542                         if (test_vec->digest.data == NULL)
543                                 return -1;
544                         if (test_vec->digest.length < opts->digest_sz)
545                                 return -1;
546                 }
547         } else if (opts->op_type == CPERF_AEAD) {
548                 if (test_vec->plaintext.data == NULL)
549                         return -1;
550                 if (test_vec->plaintext.length < opts->max_buffer_size)
551                         return -1;
552                 if (test_vec->ciphertext.data == NULL)
553                         return -1;
554                 if (test_vec->ciphertext.length < opts->max_buffer_size)
555                         return -1;
556                 if (test_vec->aead_key.data == NULL)
557                         return -1;
558                 if (test_vec->aead_key.length != opts->aead_key_sz)
559                         return -1;
560                 if (test_vec->aead_iv.data == NULL)
561                         return -1;
562                 if (test_vec->aead_iv.length != opts->aead_iv_sz)
563                         return -1;
564                 if (test_vec->aad.data == NULL)
565                         return -1;
566                 if (test_vec->aad.length != opts->aead_aad_sz)
567                         return -1;
568                 if (test_vec->digest.data == NULL)
569                         return -1;
570                 if (test_vec->digest.length < opts->digest_sz)
571                         return -1;
572         }
573         return 0;
574 }
575
576 int
577 main(int argc, char **argv)
578 {
579         struct cperf_options opts = {0};
580         struct cperf_test_vector *t_vec = NULL;
581         struct cperf_op_fns op_fns;
582         void *ctx[RTE_MAX_LCORE] = { };
583         int nb_cryptodevs = 0;
584         uint16_t total_nb_qps = 0;
585         uint8_t cdev_id, i;
586         uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
587
588         uint8_t buffer_size_idx = 0;
589
590         int ret;
591         uint32_t lcore_id;
592
593         /* Initialise DPDK EAL */
594         ret = rte_eal_init(argc, argv);
595         if (ret < 0)
596                 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
597         argc -= ret;
598         argv += ret;
599
600         cperf_options_default(&opts);
601
602         ret = cperf_options_parse(&opts, argc, argv);
603         if (ret) {
604                 RTE_LOG(ERR, USER1, "Parsing one or more user options failed\n");
605                 goto err;
606         }
607
608         ret = cperf_options_check(&opts);
609         if (ret) {
610                 RTE_LOG(ERR, USER1,
611                                 "Checking one or more user options failed\n");
612                 goto err;
613         }
614
615         nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
616
617         if (!opts.silent)
618                 cperf_options_dump(&opts);
619
620         if (nb_cryptodevs < 1) {
621                 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
622                                 "device type\n");
623                 nb_cryptodevs = 0;
624                 goto err;
625         }
626
627         ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
628                         nb_cryptodevs);
629         if (ret) {
630                 RTE_LOG(ERR, USER1, "Crypto device type does not support "
631                                 "capabilities requested\n");
632                 goto err;
633         }
634
635         if (opts.test_file != NULL) {
636                 t_vec = cperf_test_vector_get_from_file(&opts);
637                 if (t_vec == NULL) {
638                         RTE_LOG(ERR, USER1,
639                                         "Failed to create test vector for"
640                                         " specified file\n");
641                         goto err;
642                 }
643
644                 if (cperf_check_test_vector(&opts, t_vec)) {
645                         RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
646                                         "\n");
647                         goto err;
648                 }
649         } else {
650                 t_vec = cperf_test_vector_get_dummy(&opts);
651                 if (t_vec == NULL) {
652                         RTE_LOG(ERR, USER1,
653                                         "Failed to create test vector for"
654                                         " specified algorithms\n");
655                         goto err;
656                 }
657         }
658
659         ret = cperf_get_op_functions(&opts, &op_fns);
660         if (ret) {
661                 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
662                                 "specified algorithms combination\n");
663                 goto err;
664         }
665
666         if (!opts.silent && opts.test != CPERF_TEST_TYPE_THROUGHPUT &&
667                         opts.test != CPERF_TEST_TYPE_LATENCY)
668                 show_test_vector(t_vec);
669
670         total_nb_qps = nb_cryptodevs * opts.nb_qps;
671
672         i = 0;
673         uint8_t qp_id = 0, cdev_index = 0;
674         RTE_LCORE_FOREACH_WORKER(lcore_id) {
675
676                 if (i == total_nb_qps)
677                         break;
678
679                 cdev_id = enabled_cdevs[cdev_index];
680
681                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
682
683                 ctx[i] = cperf_testmap[opts.test].constructor(
684                                 session_pool_socket[socket_id].sess_mp,
685                                 session_pool_socket[socket_id].priv_mp,
686                                 cdev_id, qp_id,
687                                 &opts, t_vec, &op_fns);
688                 if (ctx[i] == NULL) {
689                         RTE_LOG(ERR, USER1, "Test run constructor failed\n");
690                         goto err;
691                 }
692                 qp_id = (qp_id + 1) % opts.nb_qps;
693                 if (qp_id == 0)
694                         cdev_index++;
695                 i++;
696         }
697
698         if (opts.imix_distribution_count != 0) {
699                 uint8_t buffer_size_count = opts.buffer_size_count;
700                 uint16_t distribution_total[buffer_size_count];
701                 uint32_t op_idx;
702                 uint32_t test_average_size = 0;
703                 const uint32_t *buffer_size_list = opts.buffer_size_list;
704                 const uint32_t *imix_distribution_list = opts.imix_distribution_list;
705
706                 opts.imix_buffer_sizes = rte_malloc(NULL,
707                                         sizeof(uint32_t) * opts.pool_sz,
708                                         0);
709                 /*
710                  * Calculate accumulated distribution of
711                  * probabilities per packet size
712                  */
713                 distribution_total[0] = imix_distribution_list[0];
714                 for (i = 1; i < buffer_size_count; i++)
715                         distribution_total[i] = imix_distribution_list[i] +
716                                 distribution_total[i-1];
717
718                 /* Calculate a random sequence of packet sizes, based on distribution */
719                 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
720                         uint16_t random_number = rte_rand() %
721                                 distribution_total[buffer_size_count - 1];
722                         for (i = 0; i < buffer_size_count; i++)
723                                 if (random_number < distribution_total[i])
724                                         break;
725
726                         opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
727                 }
728
729                 /* Calculate average buffer size for the IMIX distribution */
730                 for (i = 0; i < buffer_size_count; i++)
731                         test_average_size += buffer_size_list[i] *
732                                 imix_distribution_list[i];
733
734                 opts.test_buffer_size = test_average_size /
735                                 distribution_total[buffer_size_count - 1];
736
737                 i = 0;
738                 RTE_LCORE_FOREACH_WORKER(lcore_id) {
739
740                         if (i == total_nb_qps)
741                                 break;
742
743                         rte_eal_remote_launch(cperf_testmap[opts.test].runner,
744                                 ctx[i], lcore_id);
745                         i++;
746                 }
747                 i = 0;
748                 RTE_LCORE_FOREACH_WORKER(lcore_id) {
749
750                         if (i == total_nb_qps)
751                                 break;
752                         ret |= rte_eal_wait_lcore(lcore_id);
753                         i++;
754                 }
755
756                 if (ret != EXIT_SUCCESS)
757                         goto err;
758         } else {
759
760                 /* Get next size from range or list */
761                 if (opts.inc_buffer_size != 0)
762                         opts.test_buffer_size = opts.min_buffer_size;
763                 else
764                         opts.test_buffer_size = opts.buffer_size_list[0];
765
766                 while (opts.test_buffer_size <= opts.max_buffer_size) {
767                         i = 0;
768                         RTE_LCORE_FOREACH_WORKER(lcore_id) {
769
770                                 if (i == total_nb_qps)
771                                         break;
772
773                                 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
774                                         ctx[i], lcore_id);
775                                 i++;
776                         }
777                         i = 0;
778                         RTE_LCORE_FOREACH_WORKER(lcore_id) {
779
780                                 if (i == total_nb_qps)
781                                         break;
782                                 ret |= rte_eal_wait_lcore(lcore_id);
783                                 i++;
784                         }
785
786                         if (ret != EXIT_SUCCESS)
787                                 goto err;
788
789                         /* Get next size from range or list */
790                         if (opts.inc_buffer_size != 0)
791                                 opts.test_buffer_size += opts.inc_buffer_size;
792                         else {
793                                 if (++buffer_size_idx == opts.buffer_size_count)
794                                         break;
795                                 opts.test_buffer_size =
796                                         opts.buffer_size_list[buffer_size_idx];
797                         }
798                 }
799         }
800
801         i = 0;
802         RTE_LCORE_FOREACH_WORKER(lcore_id) {
803
804                 if (i == total_nb_qps)
805                         break;
806
807                 cperf_testmap[opts.test].destructor(ctx[i]);
808                 i++;
809         }
810
811         for (i = 0; i < nb_cryptodevs &&
812                         i < RTE_CRYPTO_MAX_DEVS; i++) {
813                 rte_cryptodev_stop(enabled_cdevs[i]);
814                 ret = rte_cryptodev_close(enabled_cdevs[i]);
815                 if (ret)
816                         RTE_LOG(ERR, USER1,
817                                         "Crypto device close error %d\n", ret);
818         }
819
820         free_test_vector(t_vec, &opts);
821
822         printf("\n");
823         return EXIT_SUCCESS;
824
825 err:
826         i = 0;
827         RTE_LCORE_FOREACH_WORKER(lcore_id) {
828                 if (i == total_nb_qps)
829                         break;
830
831                 if (ctx[i] && cperf_testmap[opts.test].destructor)
832                         cperf_testmap[opts.test].destructor(ctx[i]);
833                 i++;
834         }
835
836         for (i = 0; i < nb_cryptodevs &&
837                         i < RTE_CRYPTO_MAX_DEVS; i++) {
838                 rte_cryptodev_stop(enabled_cdevs[i]);
839                 ret = rte_cryptodev_close(enabled_cdevs[i]);
840                 if (ret)
841                         RTE_LOG(ERR, USER1,
842                                         "Crypto device close error %d\n", ret);
843
844         }
845         rte_free(opts.imix_buffer_sizes);
846         free_test_vector(t_vec, &opts);
847
848         printf("\n");
849         return EXIT_FAILURE;
850 }