fix typos
[dpdk.git] / app / test-crypto-perf / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include <rte_eal.h>
9 #include <rte_cryptodev.h>
10 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
11 #include <rte_cryptodev_scheduler.h>
12 #endif
13
14 #include "cperf.h"
15 #include "cperf_options.h"
16 #include "cperf_test_vector_parsing.h"
17 #include "cperf_test_throughput.h"
18 #include "cperf_test_latency.h"
19 #include "cperf_test_verify.h"
20 #include "cperf_test_pmd_cyclecount.h"
21
22 #define NUM_SESSIONS 2048
23 #define SESS_MEMPOOL_CACHE_SIZE 64
24
25 const char *cperf_test_type_strs[] = {
26         [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
27         [CPERF_TEST_TYPE_LATENCY] = "latency",
28         [CPERF_TEST_TYPE_VERIFY] = "verify",
29         [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
30 };
31
32 const char *cperf_op_type_strs[] = {
33         [CPERF_CIPHER_ONLY] = "cipher-only",
34         [CPERF_AUTH_ONLY] = "auth-only",
35         [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
36         [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
37         [CPERF_AEAD] = "aead"
38 };
39
40 const struct cperf_test cperf_testmap[] = {
41                 [CPERF_TEST_TYPE_THROUGHPUT] = {
42                                 cperf_throughput_test_constructor,
43                                 cperf_throughput_test_runner,
44                                 cperf_throughput_test_destructor
45                 },
46                 [CPERF_TEST_TYPE_LATENCY] = {
47                                 cperf_latency_test_constructor,
48                                 cperf_latency_test_runner,
49                                 cperf_latency_test_destructor
50                 },
51                 [CPERF_TEST_TYPE_VERIFY] = {
52                                 cperf_verify_test_constructor,
53                                 cperf_verify_test_runner,
54                                 cperf_verify_test_destructor
55                 },
56                 [CPERF_TEST_TYPE_PMDCC] = {
57                                 cperf_pmd_cyclecount_test_constructor,
58                                 cperf_pmd_cyclecount_test_runner,
59                                 cperf_pmd_cyclecount_test_destructor
60                 }
61 };
62
63 static int
64 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
65                         struct rte_mempool *session_pool_socket[])
66 {
67         uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
68         unsigned int i, j;
69         int ret;
70
71         enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
72                         enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
73         if (enabled_cdev_count == 0) {
74                 printf("No crypto devices type %s available\n",
75                                 opts->device_type);
76                 return -EINVAL;
77         }
78
79         nb_lcores = rte_lcore_count() - 1;
80
81         if (enabled_cdev_count > nb_lcores) {
82                 printf("Number of capable crypto devices (%d) "
83                                 "has to be less or equal to number of slave "
84                                 "cores (%d)\n", enabled_cdev_count, nb_lcores);
85                 return -EINVAL;
86         }
87
88         /* Create a mempool shared by all the devices */
89         uint32_t max_sess_size = 0, sess_size;
90
91         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
92                 sess_size = rte_cryptodev_get_private_session_size(cdev_id);
93                 if (sess_size > max_sess_size)
94                         max_sess_size = sess_size;
95         }
96
97         /*
98          * Calculate number of needed queue pairs, based on the amount
99          * of available number of logical cores and crypto devices.
100          * For instance, if there are 4 cores and 2 crypto devices,
101          * 2 queue pairs will be set up per device.
102          */
103         opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
104                                 (nb_lcores / enabled_cdev_count) + 1 :
105                                 nb_lcores / enabled_cdev_count;
106
107         for (i = 0; i < enabled_cdev_count &&
108                         i < RTE_CRYPTO_MAX_DEVS; i++) {
109                 cdev_id = enabled_cdevs[i];
110 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
111                 /*
112                  * If multi-core scheduler is used, limit the number
113                  * of queue pairs to 1, as there is no way to know
114                  * how many cores are being used by the PMD, and
115                  * how many will be available for the application.
116                  */
117                 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
118                                 rte_cryptodev_scheduler_mode_get(cdev_id) ==
119                                 CDEV_SCHED_MODE_MULTICORE)
120                         opts->nb_qps = 1;
121 #endif
122
123                 struct rte_cryptodev_info cdev_info;
124                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
125
126                 rte_cryptodev_info_get(cdev_id, &cdev_info);
127                 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
128                         printf("Number of needed queue pairs is higher "
129                                 "than the maximum number of queue pairs "
130                                 "per device.\n");
131                         printf("Lower the number of cores or increase "
132                                 "the number of crypto devices\n");
133                         return -EINVAL;
134                 }
135                 struct rte_cryptodev_config conf = {
136                         .nb_queue_pairs = opts->nb_qps,
137                         .socket_id = socket_id
138                 };
139
140                 struct rte_cryptodev_qp_conf qp_conf = {
141                         .nb_descriptors = opts->nb_descriptors
142                 };
143
144                 if (session_pool_socket[socket_id] == NULL) {
145                         char mp_name[RTE_MEMPOOL_NAMESIZE];
146                         struct rte_mempool *sess_mp;
147
148                         snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
149                                 "sess_mp_%u", socket_id);
150
151                         sess_mp = rte_mempool_create(mp_name,
152                                                 NUM_SESSIONS,
153                                                 max_sess_size,
154                                                 SESS_MEMPOOL_CACHE_SIZE,
155                                                 0, NULL, NULL, NULL,
156                                                 NULL, socket_id,
157                                                 0);
158
159                         if (sess_mp == NULL) {
160                                 printf("Cannot create session pool on socket %d\n",
161                                         socket_id);
162                                 return -ENOMEM;
163                         }
164
165                         printf("Allocated session pool on socket %d\n", socket_id);
166                         session_pool_socket[socket_id] = sess_mp;
167                 }
168
169                 ret = rte_cryptodev_configure(cdev_id, &conf);
170                 if (ret < 0) {
171                         printf("Failed to configure cryptodev %u", cdev_id);
172                         return -EINVAL;
173                 }
174
175                 for (j = 0; j < opts->nb_qps; j++) {
176                         ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
177                                 &qp_conf, socket_id,
178                                 session_pool_socket[socket_id]);
179                         if (ret < 0) {
180                                 printf("Failed to setup queue pair %u on "
181                                         "cryptodev %u", j, cdev_id);
182                                 return -EINVAL;
183                         }
184                 }
185
186                 ret = rte_cryptodev_start(cdev_id);
187                 if (ret < 0) {
188                         printf("Failed to start device %u: error %d\n",
189                                         cdev_id, ret);
190                         return -EPERM;
191                 }
192         }
193
194         return enabled_cdev_count;
195 }
196
197 static int
198 cperf_verify_devices_capabilities(struct cperf_options *opts,
199                 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
200 {
201         struct rte_cryptodev_sym_capability_idx cap_idx;
202         const struct rte_cryptodev_symmetric_capability *capability;
203
204         uint8_t i, cdev_id;
205         int ret;
206
207         for (i = 0; i < nb_cryptodevs; i++) {
208
209                 cdev_id = enabled_cdevs[i];
210
211                 if (opts->op_type == CPERF_AUTH_ONLY ||
212                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
213                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
214
215                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
216                         cap_idx.algo.auth = opts->auth_algo;
217
218                         capability = rte_cryptodev_sym_capability_get(cdev_id,
219                                         &cap_idx);
220                         if (capability == NULL)
221                                 return -1;
222
223                         ret = rte_cryptodev_sym_capability_check_auth(
224                                         capability,
225                                         opts->auth_key_sz,
226                                         opts->digest_sz,
227                                         opts->auth_iv_sz);
228                         if (ret != 0)
229                                 return ret;
230                 }
231
232                 if (opts->op_type == CPERF_CIPHER_ONLY ||
233                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
234                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
235
236                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
237                         cap_idx.algo.cipher = opts->cipher_algo;
238
239                         capability = rte_cryptodev_sym_capability_get(cdev_id,
240                                         &cap_idx);
241                         if (capability == NULL)
242                                 return -1;
243
244                         ret = rte_cryptodev_sym_capability_check_cipher(
245                                         capability,
246                                         opts->cipher_key_sz,
247                                         opts->cipher_iv_sz);
248                         if (ret != 0)
249                                 return ret;
250                 }
251
252                 if (opts->op_type == CPERF_AEAD) {
253
254                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
255                         cap_idx.algo.aead = opts->aead_algo;
256
257                         capability = rte_cryptodev_sym_capability_get(cdev_id,
258                                         &cap_idx);
259                         if (capability == NULL)
260                                 return -1;
261
262                         ret = rte_cryptodev_sym_capability_check_aead(
263                                         capability,
264                                         opts->aead_key_sz,
265                                         opts->digest_sz,
266                                         opts->aead_aad_sz,
267                                         opts->aead_iv_sz);
268                         if (ret != 0)
269                                 return ret;
270                 }
271         }
272
273         return 0;
274 }
275
276 static int
277 cperf_check_test_vector(struct cperf_options *opts,
278                 struct cperf_test_vector *test_vec)
279 {
280         if (opts->op_type == CPERF_CIPHER_ONLY) {
281                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
282                         if (test_vec->plaintext.data == NULL)
283                                 return -1;
284                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
285                         if (test_vec->plaintext.data == NULL)
286                                 return -1;
287                         if (test_vec->plaintext.length < opts->max_buffer_size)
288                                 return -1;
289                         if (test_vec->ciphertext.data == NULL)
290                                 return -1;
291                         if (test_vec->ciphertext.length < opts->max_buffer_size)
292                                 return -1;
293                         if (test_vec->cipher_iv.data == NULL)
294                                 return -1;
295                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
296                                 return -1;
297                         if (test_vec->cipher_key.data == NULL)
298                                 return -1;
299                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
300                                 return -1;
301                 }
302         } else if (opts->op_type == CPERF_AUTH_ONLY) {
303                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
304                         if (test_vec->plaintext.data == NULL)
305                                 return -1;
306                         if (test_vec->plaintext.length < opts->max_buffer_size)
307                                 return -1;
308                         if (test_vec->auth_key.data == NULL)
309                                 return -1;
310                         if (test_vec->auth_key.length != opts->auth_key_sz)
311                                 return -1;
312                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
313                                 return -1;
314                         /* Auth IV is only required for some algorithms */
315                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
316                                 return -1;
317                         if (test_vec->digest.data == NULL)
318                                 return -1;
319                         if (test_vec->digest.length < opts->digest_sz)
320                                 return -1;
321                 }
322
323         } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
324                         opts->op_type == CPERF_AUTH_THEN_CIPHER) {
325                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
326                         if (test_vec->plaintext.data == NULL)
327                                 return -1;
328                         if (test_vec->plaintext.length < opts->max_buffer_size)
329                                 return -1;
330                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
331                         if (test_vec->plaintext.data == NULL)
332                                 return -1;
333                         if (test_vec->plaintext.length < opts->max_buffer_size)
334                                 return -1;
335                         if (test_vec->ciphertext.data == NULL)
336                                 return -1;
337                         if (test_vec->ciphertext.length < opts->max_buffer_size)
338                                 return -1;
339                         if (test_vec->cipher_iv.data == NULL)
340                                 return -1;
341                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
342                                 return -1;
343                         if (test_vec->cipher_key.data == NULL)
344                                 return -1;
345                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
346                                 return -1;
347                 }
348                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
349                         if (test_vec->auth_key.data == NULL)
350                                 return -1;
351                         if (test_vec->auth_key.length != opts->auth_key_sz)
352                                 return -1;
353                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
354                                 return -1;
355                         /* Auth IV is only required for some algorithms */
356                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
357                                 return -1;
358                         if (test_vec->digest.data == NULL)
359                                 return -1;
360                         if (test_vec->digest.length < opts->digest_sz)
361                                 return -1;
362                 }
363         } else if (opts->op_type == CPERF_AEAD) {
364                 if (test_vec->plaintext.data == NULL)
365                         return -1;
366                 if (test_vec->plaintext.length < opts->max_buffer_size)
367                         return -1;
368                 if (test_vec->ciphertext.data == NULL)
369                         return -1;
370                 if (test_vec->ciphertext.length < opts->max_buffer_size)
371                         return -1;
372                 if (test_vec->aead_iv.data == NULL)
373                         return -1;
374                 if (test_vec->aead_iv.length != opts->aead_iv_sz)
375                         return -1;
376                 if (test_vec->aad.data == NULL)
377                         return -1;
378                 if (test_vec->aad.length != opts->aead_aad_sz)
379                         return -1;
380                 if (test_vec->digest.data == NULL)
381                         return -1;
382                 if (test_vec->digest.length < opts->digest_sz)
383                         return -1;
384         }
385         return 0;
386 }
387
388 int
389 main(int argc, char **argv)
390 {
391         struct cperf_options opts = {0};
392         struct cperf_test_vector *t_vec = NULL;
393         struct cperf_op_fns op_fns;
394
395         void *ctx[RTE_MAX_LCORE] = { };
396         struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
397
398         int nb_cryptodevs = 0;
399         uint16_t total_nb_qps = 0;
400         uint8_t cdev_id, i;
401         uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
402
403         uint8_t buffer_size_idx = 0;
404
405         int ret;
406         uint32_t lcore_id;
407
408         /* Initialise DPDK EAL */
409         ret = rte_eal_init(argc, argv);
410         if (ret < 0)
411                 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
412         argc -= ret;
413         argv += ret;
414
415         cperf_options_default(&opts);
416
417         ret = cperf_options_parse(&opts, argc, argv);
418         if (ret) {
419                 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
420                 goto err;
421         }
422
423         ret = cperf_options_check(&opts);
424         if (ret) {
425                 RTE_LOG(ERR, USER1,
426                                 "Checking on or more user options failed\n");
427                 goto err;
428         }
429
430         nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
431                         session_pool_socket);
432
433         if (!opts.silent)
434                 cperf_options_dump(&opts);
435
436         if (nb_cryptodevs < 1) {
437                 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
438                                 "device type\n");
439                 nb_cryptodevs = 0;
440                 goto err;
441         }
442
443         ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
444                         nb_cryptodevs);
445         if (ret) {
446                 RTE_LOG(ERR, USER1, "Crypto device type does not support "
447                                 "capabilities requested\n");
448                 goto err;
449         }
450
451         if (opts.test_file != NULL) {
452                 t_vec = cperf_test_vector_get_from_file(&opts);
453                 if (t_vec == NULL) {
454                         RTE_LOG(ERR, USER1,
455                                         "Failed to create test vector for"
456                                         " specified file\n");
457                         goto err;
458                 }
459
460                 if (cperf_check_test_vector(&opts, t_vec)) {
461                         RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
462                                         "\n");
463                         goto err;
464                 }
465         } else {
466                 t_vec = cperf_test_vector_get_dummy(&opts);
467                 if (t_vec == NULL) {
468                         RTE_LOG(ERR, USER1,
469                                         "Failed to create test vector for"
470                                         " specified algorithms\n");
471                         goto err;
472                 }
473         }
474
475         ret = cperf_get_op_functions(&opts, &op_fns);
476         if (ret) {
477                 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
478                                 "specified algorithms combination\n");
479                 goto err;
480         }
481
482         if (!opts.silent)
483                 show_test_vector(t_vec);
484
485         total_nb_qps = nb_cryptodevs * opts.nb_qps;
486
487         i = 0;
488         uint8_t qp_id = 0, cdev_index = 0;
489         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
490
491                 if (i == total_nb_qps)
492                         break;
493
494                 cdev_id = enabled_cdevs[cdev_index];
495
496                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
497
498                 ctx[i] = cperf_testmap[opts.test].constructor(
499                                 session_pool_socket[socket_id], cdev_id, qp_id,
500                                 &opts, t_vec, &op_fns);
501                 if (ctx[i] == NULL) {
502                         RTE_LOG(ERR, USER1, "Test run constructor failed\n");
503                         goto err;
504                 }
505                 qp_id = (qp_id + 1) % opts.nb_qps;
506                 if (qp_id == 0)
507                         cdev_index++;
508                 i++;
509         }
510
511         /* Get first size from range or list */
512         if (opts.inc_buffer_size != 0)
513                 opts.test_buffer_size = opts.min_buffer_size;
514         else
515                 opts.test_buffer_size = opts.buffer_size_list[0];
516
517         while (opts.test_buffer_size <= opts.max_buffer_size) {
518                 i = 0;
519                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
520
521                         if (i == total_nb_qps)
522                                 break;
523
524                         rte_eal_remote_launch(cperf_testmap[opts.test].runner,
525                                 ctx[i], lcore_id);
526                         i++;
527                 }
528                 i = 0;
529                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
530
531                         if (i == total_nb_qps)
532                                 break;
533                         rte_eal_wait_lcore(lcore_id);
534                         i++;
535                 }
536
537                 /* Get next size from range or list */
538                 if (opts.inc_buffer_size != 0)
539                         opts.test_buffer_size += opts.inc_buffer_size;
540                 else {
541                         if (++buffer_size_idx == opts.buffer_size_count)
542                                 break;
543                         opts.test_buffer_size = opts.buffer_size_list[buffer_size_idx];
544                 }
545         }
546
547         i = 0;
548         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
549
550                 if (i == total_nb_qps)
551                         break;
552
553                 cperf_testmap[opts.test].destructor(ctx[i]);
554                 i++;
555         }
556
557         for (i = 0; i < nb_cryptodevs &&
558                         i < RTE_CRYPTO_MAX_DEVS; i++)
559                 rte_cryptodev_stop(enabled_cdevs[i]);
560
561         free_test_vector(t_vec, &opts);
562
563         printf("\n");
564         return EXIT_SUCCESS;
565
566 err:
567         i = 0;
568         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
569                 if (i == total_nb_qps)
570                         break;
571
572                 cdev_id = enabled_cdevs[i];
573
574                 if (ctx[i] && cperf_testmap[opts.test].destructor)
575                         cperf_testmap[opts.test].destructor(ctx[i]);
576                 i++;
577         }
578
579         for (i = 0; i < nb_cryptodevs &&
580                         i < RTE_CRYPTO_MAX_DEVS; i++)
581                 rte_cryptodev_stop(enabled_cdevs[i]);
582
583         free_test_vector(t_vec, &opts);
584
585         printf("\n");
586         return EXIT_FAILURE;
587 }