bpf: allow self-xor operation
[dpdk.git] / app / test-crypto-perf / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include <rte_malloc.h>
9 #include <rte_random.h>
10 #include <rte_eal.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
14 #endif
15
16 #include "cperf.h"
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
23
24 static struct {
25         struct rte_mempool *sess_mp;
26         struct rte_mempool *priv_mp;
27 } session_pool_socket[RTE_MAX_NUMA_NODES];
28
29 const char *cperf_test_type_strs[] = {
30         [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
31         [CPERF_TEST_TYPE_LATENCY] = "latency",
32         [CPERF_TEST_TYPE_VERIFY] = "verify",
33         [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
34 };
35
36 const char *cperf_op_type_strs[] = {
37         [CPERF_CIPHER_ONLY] = "cipher-only",
38         [CPERF_AUTH_ONLY] = "auth-only",
39         [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
40         [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
41         [CPERF_AEAD] = "aead",
42         [CPERF_PDCP] = "pdcp",
43         [CPERF_DOCSIS] = "docsis",
44         [CPERF_IPSEC] = "ipsec",
45         [CPERF_ASYM_MODEX] = "modex"
46 };
47
48 const struct cperf_test cperf_testmap[] = {
49                 [CPERF_TEST_TYPE_THROUGHPUT] = {
50                                 cperf_throughput_test_constructor,
51                                 cperf_throughput_test_runner,
52                                 cperf_throughput_test_destructor
53                 },
54                 [CPERF_TEST_TYPE_LATENCY] = {
55                                 cperf_latency_test_constructor,
56                                 cperf_latency_test_runner,
57                                 cperf_latency_test_destructor
58                 },
59                 [CPERF_TEST_TYPE_VERIFY] = {
60                                 cperf_verify_test_constructor,
61                                 cperf_verify_test_runner,
62                                 cperf_verify_test_destructor
63                 },
64                 [CPERF_TEST_TYPE_PMDCC] = {
65                                 cperf_pmd_cyclecount_test_constructor,
66                                 cperf_pmd_cyclecount_test_runner,
67                                 cperf_pmd_cyclecount_test_destructor
68                 }
69 };
70
71 static int
72 create_asym_op_pool_socket(uint8_t dev_id, int32_t socket_id,
73                            uint32_t nb_sessions)
74 {
75         char mp_name[RTE_MEMPOOL_NAMESIZE];
76         struct rte_mempool *mpool = NULL;
77         unsigned int session_size =
78                 RTE_MAX(rte_cryptodev_asym_get_private_session_size(dev_id),
79                         rte_cryptodev_asym_get_header_session_size());
80
81         if (session_pool_socket[socket_id].priv_mp == NULL) {
82                 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_priv_pool%u",
83                          socket_id);
84
85                 mpool = rte_mempool_create(mp_name, nb_sessions, session_size,
86                                            0, 0, NULL, NULL, NULL, NULL,
87                                            socket_id, 0);
88                 if (mpool == NULL) {
89                         printf("Cannot create pool \"%s\" on socket %d\n",
90                                mp_name, socket_id);
91                         return -ENOMEM;
92                 }
93                 printf("Allocated pool \"%s\" on socket %d\n", mp_name,
94                        socket_id);
95                 session_pool_socket[socket_id].priv_mp = mpool;
96         }
97
98         if (session_pool_socket[socket_id].sess_mp == NULL) {
99
100                 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_sess_pool%u",
101                          socket_id);
102                 mpool = rte_mempool_create(mp_name, nb_sessions,
103                                            session_size, 0, 0, NULL, NULL, NULL,
104                                            NULL, socket_id, 0);
105                 if (mpool == NULL) {
106                         printf("Cannot create pool \"%s\" on socket %d\n",
107                                mp_name, socket_id);
108                         return -ENOMEM;
109                 }
110                 session_pool_socket[socket_id].sess_mp = mpool;
111         }
112         return 0;
113 }
114
115 static int
116 fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size,
117                 uint32_t nb_sessions)
118 {
119         char mp_name[RTE_MEMPOOL_NAMESIZE];
120         struct rte_mempool *sess_mp;
121
122         if (session_pool_socket[socket_id].priv_mp == NULL) {
123                 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
124                         "priv_sess_mp_%u", socket_id);
125
126                 sess_mp = rte_mempool_create(mp_name,
127                                         nb_sessions,
128                                         session_priv_size,
129                                         0, 0, NULL, NULL, NULL,
130                                         NULL, socket_id,
131                                         0);
132
133                 if (sess_mp == NULL) {
134                         printf("Cannot create pool \"%s\" on socket %d\n",
135                                 mp_name, socket_id);
136                         return -ENOMEM;
137                 }
138
139                 printf("Allocated pool \"%s\" on socket %d\n",
140                         mp_name, socket_id);
141                 session_pool_socket[socket_id].priv_mp = sess_mp;
142         }
143
144         if (session_pool_socket[socket_id].sess_mp == NULL) {
145
146                 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
147                         "sess_mp_%u", socket_id);
148
149                 sess_mp = rte_cryptodev_sym_session_pool_create(mp_name,
150                                         nb_sessions, 0, 0, 0, socket_id);
151
152                 if (sess_mp == NULL) {
153                         printf("Cannot create pool \"%s\" on socket %d\n",
154                                 mp_name, socket_id);
155                         return -ENOMEM;
156                 }
157
158                 printf("Allocated pool \"%s\" on socket %d\n",
159                         mp_name, socket_id);
160                 session_pool_socket[socket_id].sess_mp = sess_mp;
161         }
162
163         return 0;
164 }
165
166 static int
167 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
168 {
169         uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
170         uint32_t sessions_needed = 0;
171         unsigned int i, j;
172         int ret;
173
174         enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
175                         enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
176         if (enabled_cdev_count == 0) {
177                 printf("No crypto devices type %s available\n",
178                                 opts->device_type);
179                 return -EINVAL;
180         }
181
182         nb_lcores = rte_lcore_count() - 1;
183
184         if (nb_lcores < 1) {
185                 RTE_LOG(ERR, USER1,
186                         "Number of enabled cores need to be higher than 1\n");
187                 return -EINVAL;
188         }
189
190         /*
191          * Use less number of devices,
192          * if there are more available than cores.
193          */
194         if (enabled_cdev_count > nb_lcores)
195                 enabled_cdev_count = nb_lcores;
196
197         /* Create a mempool shared by all the devices */
198         uint32_t max_sess_size = 0, sess_size;
199
200         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
201                 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
202                 if (sess_size > max_sess_size)
203                         max_sess_size = sess_size;
204         }
205 #ifdef RTE_LIB_SECURITY
206         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
207                 sess_size = rte_security_session_get_size(
208                                 rte_cryptodev_get_sec_ctx(cdev_id));
209                 if (sess_size > max_sess_size)
210                         max_sess_size = sess_size;
211         }
212 #endif
213         /*
214          * Calculate number of needed queue pairs, based on the amount
215          * of available number of logical cores and crypto devices.
216          * For instance, if there are 4 cores and 2 crypto devices,
217          * 2 queue pairs will be set up per device.
218          */
219         opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
220                                 (nb_lcores / enabled_cdev_count) + 1 :
221                                 nb_lcores / enabled_cdev_count;
222
223         for (i = 0; i < enabled_cdev_count &&
224                         i < RTE_CRYPTO_MAX_DEVS; i++) {
225                 cdev_id = enabled_cdevs[i];
226 #ifdef RTE_CRYPTO_SCHEDULER
227                 /*
228                  * If multi-core scheduler is used, limit the number
229                  * of queue pairs to 1, as there is no way to know
230                  * how many cores are being used by the PMD, and
231                  * how many will be available for the application.
232                  */
233                 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
234                                 rte_cryptodev_scheduler_mode_get(cdev_id) ==
235                                 CDEV_SCHED_MODE_MULTICORE)
236                         opts->nb_qps = 1;
237 #endif
238
239                 struct rte_cryptodev_info cdev_info;
240                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
241                 /* range check the socket_id - negative values become big
242                  * positive ones due to use of unsigned value
243                  */
244                 if (socket_id >= RTE_MAX_NUMA_NODES)
245                         socket_id = 0;
246
247                 rte_cryptodev_info_get(cdev_id, &cdev_info);
248
249                 if (opts->op_type == CPERF_ASYM_MODEX) {
250                         if ((cdev_info.feature_flags &
251                              RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) == 0)
252                                 continue;
253                 }
254
255                 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
256                         printf("Number of needed queue pairs is higher "
257                                 "than the maximum number of queue pairs "
258                                 "per device.\n");
259                         printf("Lower the number of cores or increase "
260                                 "the number of crypto devices\n");
261                         return -EINVAL;
262                 }
263                 struct rte_cryptodev_config conf = {
264                         .nb_queue_pairs = opts->nb_qps,
265                         .socket_id = socket_id,
266                 };
267
268                 switch (opts->op_type) {
269                 case CPERF_ASYM_MODEX:
270                         conf.ff_disable |= (RTE_CRYPTODEV_FF_SECURITY |
271                                             RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO);
272                         break;
273                 case CPERF_CIPHER_ONLY:
274                 case CPERF_AUTH_ONLY:
275                 case CPERF_CIPHER_THEN_AUTH:
276                 case CPERF_AUTH_THEN_CIPHER:
277                 case CPERF_AEAD:
278                         conf.ff_disable |= RTE_CRYPTODEV_FF_SECURITY;
279                         /* Fall through */
280                 case CPERF_PDCP:
281                 case CPERF_DOCSIS:
282                 case CPERF_IPSEC:
283                         /* Fall through */
284                 default:
285                         conf.ff_disable |= RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
286                 }
287
288                 struct rte_cryptodev_qp_conf qp_conf = {
289                         .nb_descriptors = opts->nb_descriptors
290                 };
291
292                 /**
293                  * Device info specifies the min headroom and tailroom
294                  * requirement for the crypto PMD. This need to be honoured
295                  * by the application, while creating mbuf.
296                  */
297                 if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
298                         /* Update headroom */
299                         opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
300                 }
301                 if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
302                         /* Update tailroom */
303                         opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
304                 }
305
306                 /* Update segment size to include headroom & tailroom */
307                 opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
308
309                 uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
310                 /*
311                  * Two sessions objects are required for each session
312                  * (one for the header, one for the private data)
313                  */
314                 if (!strcmp((const char *)opts->device_type,
315                                         "crypto_scheduler")) {
316 #ifdef RTE_CRYPTO_SCHEDULER
317                         uint32_t nb_slaves =
318                                 rte_cryptodev_scheduler_workers_get(cdev_id,
319                                                                 NULL);
320
321                         sessions_needed = enabled_cdev_count *
322                                 opts->nb_qps * nb_slaves;
323 #endif
324                 } else
325                         sessions_needed = enabled_cdev_count * opts->nb_qps;
326
327                 /*
328                  * A single session is required per queue pair
329                  * in each device
330                  */
331                 if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
332                         RTE_LOG(ERR, USER1,
333                                 "Device does not support at least "
334                                 "%u sessions\n", opts->nb_qps);
335                         return -ENOTSUP;
336                 }
337
338                 if (opts->op_type == CPERF_ASYM_MODEX)
339                         ret = create_asym_op_pool_socket(cdev_id, socket_id,
340                                                          sessions_needed);
341                 else
342                         ret = fill_session_pool_socket(socket_id, max_sess_size,
343                                                        sessions_needed);
344                 if (ret < 0)
345                         return ret;
346
347                 qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
348                 qp_conf.mp_session_private =
349                                 session_pool_socket[socket_id].priv_mp;
350
351                 if (opts->op_type == CPERF_ASYM_MODEX) {
352                         qp_conf.mp_session = NULL;
353                         qp_conf.mp_session_private = NULL;
354                 }
355
356                 ret = rte_cryptodev_configure(cdev_id, &conf);
357                 if (ret < 0) {
358                         printf("Failed to configure cryptodev %u", cdev_id);
359                         return -EINVAL;
360                 }
361
362                 for (j = 0; j < opts->nb_qps; j++) {
363                         ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
364                                 &qp_conf, socket_id);
365                         if (ret < 0) {
366                                 printf("Failed to setup queue pair %u on "
367                                         "cryptodev %u", j, cdev_id);
368                                 return -EINVAL;
369                         }
370                 }
371
372                 ret = rte_cryptodev_start(cdev_id);
373                 if (ret < 0) {
374                         printf("Failed to start device %u: error %d\n",
375                                         cdev_id, ret);
376                         return -EPERM;
377                 }
378         }
379
380         return enabled_cdev_count;
381 }
382
383 static int
384 cperf_verify_devices_capabilities(struct cperf_options *opts,
385                 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
386 {
387         struct rte_cryptodev_sym_capability_idx cap_idx;
388         const struct rte_cryptodev_symmetric_capability *capability;
389         struct rte_cryptodev_asym_capability_idx asym_cap_idx;
390         const struct rte_cryptodev_asymmetric_xform_capability *asym_capability;
391
392
393         uint8_t i, cdev_id;
394         int ret;
395
396         for (i = 0; i < nb_cryptodevs; i++) {
397
398                 cdev_id = enabled_cdevs[i];
399
400                 if (opts->op_type == CPERF_ASYM_MODEX) {
401                         asym_cap_idx.type = RTE_CRYPTO_ASYM_XFORM_MODEX;
402                         asym_capability = rte_cryptodev_asym_capability_get(
403                                 cdev_id, &asym_cap_idx);
404                         if (asym_capability == NULL)
405                                 return -1;
406
407                         ret = rte_cryptodev_asym_xform_capability_check_modlen(
408                                 asym_capability, sizeof(perf_mod_p));
409                         if (ret != 0)
410                                 return ret;
411
412                 }
413
414                 if (opts->op_type == CPERF_AUTH_ONLY ||
415                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
416                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
417
418                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
419                         cap_idx.algo.auth = opts->auth_algo;
420
421                         capability = rte_cryptodev_sym_capability_get(cdev_id,
422                                         &cap_idx);
423                         if (capability == NULL)
424                                 return -1;
425
426                         ret = rte_cryptodev_sym_capability_check_auth(
427                                         capability,
428                                         opts->auth_key_sz,
429                                         opts->digest_sz,
430                                         opts->auth_iv_sz);
431                         if (ret != 0)
432                                 return ret;
433                 }
434
435                 if (opts->op_type == CPERF_CIPHER_ONLY ||
436                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
437                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
438
439                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
440                         cap_idx.algo.cipher = opts->cipher_algo;
441
442                         capability = rte_cryptodev_sym_capability_get(cdev_id,
443                                         &cap_idx);
444                         if (capability == NULL)
445                                 return -1;
446
447                         ret = rte_cryptodev_sym_capability_check_cipher(
448                                         capability,
449                                         opts->cipher_key_sz,
450                                         opts->cipher_iv_sz);
451                         if (ret != 0)
452                                 return ret;
453                 }
454
455                 if (opts->op_type == CPERF_AEAD) {
456
457                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
458                         cap_idx.algo.aead = opts->aead_algo;
459
460                         capability = rte_cryptodev_sym_capability_get(cdev_id,
461                                         &cap_idx);
462                         if (capability == NULL)
463                                 return -1;
464
465                         ret = rte_cryptodev_sym_capability_check_aead(
466                                         capability,
467                                         opts->aead_key_sz,
468                                         opts->digest_sz,
469                                         opts->aead_aad_sz,
470                                         opts->aead_iv_sz);
471                         if (ret != 0)
472                                 return ret;
473                 }
474         }
475
476         return 0;
477 }
478
479 static int
480 cperf_check_test_vector(struct cperf_options *opts,
481                 struct cperf_test_vector *test_vec)
482 {
483         if (opts->op_type == CPERF_CIPHER_ONLY) {
484                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
485                         if (test_vec->plaintext.data == NULL)
486                                 return -1;
487                 } else {
488                         if (test_vec->plaintext.data == NULL)
489                                 return -1;
490                         if (test_vec->plaintext.length < opts->max_buffer_size)
491                                 return -1;
492                         if (test_vec->ciphertext.data == NULL)
493                                 return -1;
494                         if (test_vec->ciphertext.length < opts->max_buffer_size)
495                                 return -1;
496                         /* Cipher IV is only required for some algorithms */
497                         if (opts->cipher_iv_sz &&
498                                         test_vec->cipher_iv.data == NULL)
499                                 return -1;
500                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
501                                 return -1;
502                         if (test_vec->cipher_key.data == NULL)
503                                 return -1;
504                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
505                                 return -1;
506                 }
507         } else if (opts->op_type == CPERF_AUTH_ONLY) {
508                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
509                         if (test_vec->plaintext.data == NULL)
510                                 return -1;
511                         if (test_vec->plaintext.length < opts->max_buffer_size)
512                                 return -1;
513                         /* Auth key is only required for some algorithms */
514                         if (opts->auth_key_sz &&
515                                         test_vec->auth_key.data == NULL)
516                                 return -1;
517                         if (test_vec->auth_key.length != opts->auth_key_sz)
518                                 return -1;
519                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
520                                 return -1;
521                         /* Auth IV is only required for some algorithms */
522                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
523                                 return -1;
524                         if (test_vec->digest.data == NULL)
525                                 return -1;
526                         if (test_vec->digest.length < opts->digest_sz)
527                                 return -1;
528                 }
529
530         } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
531                         opts->op_type == CPERF_AUTH_THEN_CIPHER) {
532                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
533                         if (test_vec->plaintext.data == NULL)
534                                 return -1;
535                         if (test_vec->plaintext.length < opts->max_buffer_size)
536                                 return -1;
537                 } else {
538                         if (test_vec->plaintext.data == NULL)
539                                 return -1;
540                         if (test_vec->plaintext.length < opts->max_buffer_size)
541                                 return -1;
542                         if (test_vec->ciphertext.data == NULL)
543                                 return -1;
544                         if (test_vec->ciphertext.length < opts->max_buffer_size)
545                                 return -1;
546                         if (test_vec->cipher_iv.data == NULL)
547                                 return -1;
548                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
549                                 return -1;
550                         if (test_vec->cipher_key.data == NULL)
551                                 return -1;
552                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
553                                 return -1;
554                 }
555                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
556                         if (test_vec->auth_key.data == NULL)
557                                 return -1;
558                         if (test_vec->auth_key.length != opts->auth_key_sz)
559                                 return -1;
560                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
561                                 return -1;
562                         /* Auth IV is only required for some algorithms */
563                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
564                                 return -1;
565                         if (test_vec->digest.data == NULL)
566                                 return -1;
567                         if (test_vec->digest.length < opts->digest_sz)
568                                 return -1;
569                 }
570         } else if (opts->op_type == CPERF_AEAD) {
571                 if (test_vec->plaintext.data == NULL)
572                         return -1;
573                 if (test_vec->plaintext.length < opts->max_buffer_size)
574                         return -1;
575                 if (test_vec->ciphertext.data == NULL)
576                         return -1;
577                 if (test_vec->ciphertext.length < opts->max_buffer_size)
578                         return -1;
579                 if (test_vec->aead_key.data == NULL)
580                         return -1;
581                 if (test_vec->aead_key.length != opts->aead_key_sz)
582                         return -1;
583                 if (test_vec->aead_iv.data == NULL)
584                         return -1;
585                 if (test_vec->aead_iv.length != opts->aead_iv_sz)
586                         return -1;
587                 if (test_vec->aad.data == NULL)
588                         return -1;
589                 if (test_vec->aad.length != opts->aead_aad_sz)
590                         return -1;
591                 if (test_vec->digest.data == NULL)
592                         return -1;
593                 if (test_vec->digest.length < opts->digest_sz)
594                         return -1;
595         }
596         return 0;
597 }
598
599 int
600 main(int argc, char **argv)
601 {
602         struct cperf_options opts = {0};
603         struct cperf_test_vector *t_vec = NULL;
604         struct cperf_op_fns op_fns;
605         void *ctx[RTE_MAX_LCORE] = { };
606         int nb_cryptodevs = 0;
607         uint16_t total_nb_qps = 0;
608         uint8_t cdev_id, i;
609         uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
610
611         uint8_t buffer_size_idx = 0;
612
613         int ret;
614         uint32_t lcore_id;
615
616         /* Initialise DPDK EAL */
617         ret = rte_eal_init(argc, argv);
618         if (ret < 0)
619                 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
620         argc -= ret;
621         argv += ret;
622
623         cperf_options_default(&opts);
624
625         ret = cperf_options_parse(&opts, argc, argv);
626         if (ret) {
627                 RTE_LOG(ERR, USER1, "Parsing one or more user options failed\n");
628                 goto err;
629         }
630
631         ret = cperf_options_check(&opts);
632         if (ret) {
633                 RTE_LOG(ERR, USER1,
634                                 "Checking one or more user options failed\n");
635                 goto err;
636         }
637
638         nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
639
640         if (!opts.silent)
641                 cperf_options_dump(&opts);
642
643         if (nb_cryptodevs < 1) {
644                 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
645                                 "device type\n");
646                 nb_cryptodevs = 0;
647                 goto err;
648         }
649
650         ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
651                         nb_cryptodevs);
652         if (ret) {
653                 RTE_LOG(ERR, USER1, "Crypto device type does not support "
654                                 "capabilities requested\n");
655                 goto err;
656         }
657
658         if (opts.test_file != NULL) {
659                 t_vec = cperf_test_vector_get_from_file(&opts);
660                 if (t_vec == NULL) {
661                         RTE_LOG(ERR, USER1,
662                                         "Failed to create test vector for"
663                                         " specified file\n");
664                         goto err;
665                 }
666
667                 if (cperf_check_test_vector(&opts, t_vec)) {
668                         RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
669                                         "\n");
670                         goto err;
671                 }
672         } else {
673                 t_vec = cperf_test_vector_get_dummy(&opts);
674                 if (t_vec == NULL) {
675                         RTE_LOG(ERR, USER1,
676                                         "Failed to create test vector for"
677                                         " specified algorithms\n");
678                         goto err;
679                 }
680         }
681
682         ret = cperf_get_op_functions(&opts, &op_fns);
683         if (ret) {
684                 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
685                                 "specified algorithms combination\n");
686                 goto err;
687         }
688
689         if (!opts.silent && opts.test != CPERF_TEST_TYPE_THROUGHPUT &&
690                         opts.test != CPERF_TEST_TYPE_LATENCY)
691                 show_test_vector(t_vec);
692
693         total_nb_qps = nb_cryptodevs * opts.nb_qps;
694
695         i = 0;
696         uint8_t qp_id = 0, cdev_index = 0;
697         RTE_LCORE_FOREACH_WORKER(lcore_id) {
698
699                 if (i == total_nb_qps)
700                         break;
701
702                 cdev_id = enabled_cdevs[cdev_index];
703
704                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
705
706                 ctx[i] = cperf_testmap[opts.test].constructor(
707                                 session_pool_socket[socket_id].sess_mp,
708                                 session_pool_socket[socket_id].priv_mp,
709                                 cdev_id, qp_id,
710                                 &opts, t_vec, &op_fns);
711                 if (ctx[i] == NULL) {
712                         RTE_LOG(ERR, USER1, "Test run constructor failed\n");
713                         goto err;
714                 }
715                 qp_id = (qp_id + 1) % opts.nb_qps;
716                 if (qp_id == 0)
717                         cdev_index++;
718                 i++;
719         }
720
721         if (opts.imix_distribution_count != 0) {
722                 uint8_t buffer_size_count = opts.buffer_size_count;
723                 uint16_t distribution_total[buffer_size_count];
724                 uint32_t op_idx;
725                 uint32_t test_average_size = 0;
726                 const uint32_t *buffer_size_list = opts.buffer_size_list;
727                 const uint32_t *imix_distribution_list = opts.imix_distribution_list;
728
729                 opts.imix_buffer_sizes = rte_malloc(NULL,
730                                         sizeof(uint32_t) * opts.pool_sz,
731                                         0);
732                 /*
733                  * Calculate accumulated distribution of
734                  * probabilities per packet size
735                  */
736                 distribution_total[0] = imix_distribution_list[0];
737                 for (i = 1; i < buffer_size_count; i++)
738                         distribution_total[i] = imix_distribution_list[i] +
739                                 distribution_total[i-1];
740
741                 /* Calculate a random sequence of packet sizes, based on distribution */
742                 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
743                         uint16_t random_number = rte_rand() %
744                                 distribution_total[buffer_size_count - 1];
745                         for (i = 0; i < buffer_size_count; i++)
746                                 if (random_number < distribution_total[i])
747                                         break;
748
749                         opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
750                 }
751
752                 /* Calculate average buffer size for the IMIX distribution */
753                 for (i = 0; i < buffer_size_count; i++)
754                         test_average_size += buffer_size_list[i] *
755                                 imix_distribution_list[i];
756
757                 opts.test_buffer_size = test_average_size /
758                                 distribution_total[buffer_size_count - 1];
759
760                 i = 0;
761                 RTE_LCORE_FOREACH_WORKER(lcore_id) {
762
763                         if (i == total_nb_qps)
764                                 break;
765
766                         rte_eal_remote_launch(cperf_testmap[opts.test].runner,
767                                 ctx[i], lcore_id);
768                         i++;
769                 }
770                 i = 0;
771                 RTE_LCORE_FOREACH_WORKER(lcore_id) {
772
773                         if (i == total_nb_qps)
774                                 break;
775                         ret |= rte_eal_wait_lcore(lcore_id);
776                         i++;
777                 }
778
779                 if (ret != EXIT_SUCCESS)
780                         goto err;
781         } else {
782
783                 /* Get next size from range or list */
784                 if (opts.inc_buffer_size != 0)
785                         opts.test_buffer_size = opts.min_buffer_size;
786                 else
787                         opts.test_buffer_size = opts.buffer_size_list[0];
788
789                 while (opts.test_buffer_size <= opts.max_buffer_size) {
790                         i = 0;
791                         RTE_LCORE_FOREACH_WORKER(lcore_id) {
792
793                                 if (i == total_nb_qps)
794                                         break;
795
796                                 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
797                                         ctx[i], lcore_id);
798                                 i++;
799                         }
800                         i = 0;
801                         RTE_LCORE_FOREACH_WORKER(lcore_id) {
802
803                                 if (i == total_nb_qps)
804                                         break;
805                                 ret |= rte_eal_wait_lcore(lcore_id);
806                                 i++;
807                         }
808
809                         if (ret != EXIT_SUCCESS)
810                                 goto err;
811
812                         /* Get next size from range or list */
813                         if (opts.inc_buffer_size != 0)
814                                 opts.test_buffer_size += opts.inc_buffer_size;
815                         else {
816                                 if (++buffer_size_idx == opts.buffer_size_count)
817                                         break;
818                                 opts.test_buffer_size =
819                                         opts.buffer_size_list[buffer_size_idx];
820                         }
821                 }
822         }
823
824         i = 0;
825         RTE_LCORE_FOREACH_WORKER(lcore_id) {
826
827                 if (i == total_nb_qps)
828                         break;
829
830                 cperf_testmap[opts.test].destructor(ctx[i]);
831                 i++;
832         }
833
834         for (i = 0; i < nb_cryptodevs &&
835                         i < RTE_CRYPTO_MAX_DEVS; i++) {
836                 rte_cryptodev_stop(enabled_cdevs[i]);
837                 ret = rte_cryptodev_close(enabled_cdevs[i]);
838                 if (ret)
839                         RTE_LOG(ERR, USER1,
840                                         "Crypto device close error %d\n", ret);
841         }
842
843         free_test_vector(t_vec, &opts);
844
845         printf("\n");
846         return EXIT_SUCCESS;
847
848 err:
849         i = 0;
850         RTE_LCORE_FOREACH_WORKER(lcore_id) {
851                 if (i == total_nb_qps)
852                         break;
853
854                 if (ctx[i] && cperf_testmap[opts.test].destructor)
855                         cperf_testmap[opts.test].destructor(ctx[i]);
856                 i++;
857         }
858
859         for (i = 0; i < nb_cryptodevs &&
860                         i < RTE_CRYPTO_MAX_DEVS; i++) {
861                 rte_cryptodev_stop(enabled_cdevs[i]);
862                 ret = rte_cryptodev_close(enabled_cdevs[i]);
863                 if (ret)
864                         RTE_LOG(ERR, USER1,
865                                         "Crypto device close error %d\n", ret);
866
867         }
868         rte_free(opts.imix_buffer_sizes);
869         free_test_vector(t_vec, &opts);
870
871         printf("\n");
872         return EXIT_FAILURE;
873 }