mbuf: extend meaning of QinQ stripped bit
[dpdk.git] / lib / librte_cryptodev / rte_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39 #include <rte_compat.h>
40 #include <rte_function_versioning.h>
41
42 #include "rte_crypto.h"
43 #include "rte_cryptodev.h"
44 #include "rte_cryptodev_pmd.h"
45 #include "rte_cryptodev_trace.h"
46
47 static uint8_t nb_drivers;
48
49 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
50
51 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
52
53 static struct rte_cryptodev_global cryptodev_globals = {
54                 .devs                   = rte_crypto_devices,
55                 .data                   = { NULL },
56                 .nb_devs                = 0
57 };
58
59 /* spinlock for crypto device callbacks */
60 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
61
62 static const struct rte_cryptodev_capabilities
63                 cryptodev_undefined_capabilities[] = {
64                 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
65 };
66
67 static struct rte_cryptodev_capabilities
68                 *capability_copy[RTE_CRYPTO_MAX_DEVS];
69 static uint8_t is_capability_checked[RTE_CRYPTO_MAX_DEVS];
70
71 /**
72  * The user application callback description.
73  *
74  * It contains callback address to be registered by user application,
75  * the pointer to the parameters for callback, and the event type.
76  */
77 struct rte_cryptodev_callback {
78         TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
79         rte_cryptodev_cb_fn cb_fn;              /**< Callback address */
80         void *cb_arg;                           /**< Parameter for callback */
81         enum rte_cryptodev_event_type event;    /**< Interrupt event type */
82         uint32_t active;                        /**< Callback is executing */
83 };
84
85 /**
86  * The crypto cipher algorithm strings identifiers.
87  * It could be used in application command line.
88  */
89 const char *
90 rte_crypto_cipher_algorithm_strings[] = {
91         [RTE_CRYPTO_CIPHER_3DES_CBC]    = "3des-cbc",
92         [RTE_CRYPTO_CIPHER_3DES_ECB]    = "3des-ecb",
93         [RTE_CRYPTO_CIPHER_3DES_CTR]    = "3des-ctr",
94
95         [RTE_CRYPTO_CIPHER_AES_CBC]     = "aes-cbc",
96         [RTE_CRYPTO_CIPHER_AES_CTR]     = "aes-ctr",
97         [RTE_CRYPTO_CIPHER_AES_DOCSISBPI]       = "aes-docsisbpi",
98         [RTE_CRYPTO_CIPHER_AES_ECB]     = "aes-ecb",
99         [RTE_CRYPTO_CIPHER_AES_F8]      = "aes-f8",
100         [RTE_CRYPTO_CIPHER_AES_XTS]     = "aes-xts",
101
102         [RTE_CRYPTO_CIPHER_ARC4]        = "arc4",
103
104         [RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
105         [RTE_CRYPTO_CIPHER_DES_DOCSISBPI]       = "des-docsisbpi",
106
107         [RTE_CRYPTO_CIPHER_NULL]        = "null",
108
109         [RTE_CRYPTO_CIPHER_KASUMI_F8]   = "kasumi-f8",
110         [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
111         [RTE_CRYPTO_CIPHER_ZUC_EEA3]    = "zuc-eea3"
112 };
113
114 /**
115  * The crypto cipher operation strings identifiers.
116  * It could be used in application command line.
117  */
118 const char *
119 rte_crypto_cipher_operation_strings[] = {
120                 [RTE_CRYPTO_CIPHER_OP_ENCRYPT]  = "encrypt",
121                 [RTE_CRYPTO_CIPHER_OP_DECRYPT]  = "decrypt"
122 };
123
124 /**
125  * The crypto auth algorithm strings identifiers.
126  * It could be used in application command line.
127  */
128 const char *
129 rte_crypto_auth_algorithm_strings[] = {
130         [RTE_CRYPTO_AUTH_AES_CBC_MAC]   = "aes-cbc-mac",
131         [RTE_CRYPTO_AUTH_AES_CMAC]      = "aes-cmac",
132         [RTE_CRYPTO_AUTH_AES_GMAC]      = "aes-gmac",
133         [RTE_CRYPTO_AUTH_AES_XCBC_MAC]  = "aes-xcbc-mac",
134
135         [RTE_CRYPTO_AUTH_MD5]           = "md5",
136         [RTE_CRYPTO_AUTH_MD5_HMAC]      = "md5-hmac",
137
138         [RTE_CRYPTO_AUTH_NULL]          = "null",
139
140         [RTE_CRYPTO_AUTH_SHA1]          = "sha1",
141         [RTE_CRYPTO_AUTH_SHA1_HMAC]     = "sha1-hmac",
142
143         [RTE_CRYPTO_AUTH_SHA224]        = "sha2-224",
144         [RTE_CRYPTO_AUTH_SHA224_HMAC]   = "sha2-224-hmac",
145         [RTE_CRYPTO_AUTH_SHA256]        = "sha2-256",
146         [RTE_CRYPTO_AUTH_SHA256_HMAC]   = "sha2-256-hmac",
147         [RTE_CRYPTO_AUTH_SHA384]        = "sha2-384",
148         [RTE_CRYPTO_AUTH_SHA384_HMAC]   = "sha2-384-hmac",
149         [RTE_CRYPTO_AUTH_SHA512]        = "sha2-512",
150         [RTE_CRYPTO_AUTH_SHA512_HMAC]   = "sha2-512-hmac",
151
152         [RTE_CRYPTO_AUTH_KASUMI_F9]     = "kasumi-f9",
153         [RTE_CRYPTO_AUTH_SNOW3G_UIA2]   = "snow3g-uia2",
154         [RTE_CRYPTO_AUTH_ZUC_EIA3]      = "zuc-eia3"
155 };
156
157 /**
158  * The crypto AEAD algorithm strings identifiers.
159  * It could be used in application command line.
160  */
161 const char *
162 rte_crypto_aead_algorithm_strings[] = {
163         [RTE_CRYPTO_AEAD_AES_CCM]       = "aes-ccm",
164         [RTE_CRYPTO_AEAD_AES_GCM]       = "aes-gcm",
165         [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
166 };
167
168 /**
169  * The crypto AEAD operation strings identifiers.
170  * It could be used in application command line.
171  */
172 const char *
173 rte_crypto_aead_operation_strings[] = {
174         [RTE_CRYPTO_AEAD_OP_ENCRYPT]    = "encrypt",
175         [RTE_CRYPTO_AEAD_OP_DECRYPT]    = "decrypt"
176 };
177
178 /**
179  * Asymmetric crypto transform operation strings identifiers.
180  */
181 const char *rte_crypto_asym_xform_strings[] = {
182         [RTE_CRYPTO_ASYM_XFORM_NONE]    = "none",
183         [RTE_CRYPTO_ASYM_XFORM_RSA]     = "rsa",
184         [RTE_CRYPTO_ASYM_XFORM_MODEX]   = "modexp",
185         [RTE_CRYPTO_ASYM_XFORM_MODINV]  = "modinv",
186         [RTE_CRYPTO_ASYM_XFORM_DH]      = "dh",
187         [RTE_CRYPTO_ASYM_XFORM_DSA]     = "dsa",
188         [RTE_CRYPTO_ASYM_XFORM_ECDSA]   = "ecdsa",
189         [RTE_CRYPTO_ASYM_XFORM_ECPM]    = "ecpm",
190 };
191
192 /**
193  * Asymmetric crypto operation strings identifiers.
194  */
195 const char *rte_crypto_asym_op_strings[] = {
196         [RTE_CRYPTO_ASYM_OP_ENCRYPT]    = "encrypt",
197         [RTE_CRYPTO_ASYM_OP_DECRYPT]    = "decrypt",
198         [RTE_CRYPTO_ASYM_OP_SIGN]       = "sign",
199         [RTE_CRYPTO_ASYM_OP_VERIFY]     = "verify",
200         [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]       = "priv_key_generate",
201         [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
202         [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
203 };
204
205 /**
206  * The private data structure stored in the session mempool private data.
207  */
208 struct rte_cryptodev_sym_session_pool_private_data {
209         uint16_t nb_drivers;
210         /**< number of elements in sess_data array */
211         uint16_t user_data_sz;
212         /**< session user data will be placed after sess_data */
213 };
214
215 int
216 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
217                 const char *algo_string)
218 {
219         unsigned int i;
220
221         for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
222                 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
223                         *algo_enum = (enum rte_crypto_cipher_algorithm) i;
224                         return 0;
225                 }
226         }
227
228         /* Invalid string */
229         return -1;
230 }
231
232 int
233 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
234                 const char *algo_string)
235 {
236         unsigned int i;
237
238         for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
239                 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
240                         *algo_enum = (enum rte_crypto_auth_algorithm) i;
241                         return 0;
242                 }
243         }
244
245         /* Invalid string */
246         return -1;
247 }
248
249 int
250 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
251                 const char *algo_string)
252 {
253         unsigned int i;
254
255         for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
256                 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
257                         *algo_enum = (enum rte_crypto_aead_algorithm) i;
258                         return 0;
259                 }
260         }
261
262         /* Invalid string */
263         return -1;
264 }
265
266 int
267 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
268                 const char *xform_string)
269 {
270         unsigned int i;
271
272         for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
273                 if (strcmp(xform_string,
274                         rte_crypto_asym_xform_strings[i]) == 0) {
275                         *xform_enum = (enum rte_crypto_asym_xform_type) i;
276                         return 0;
277                 }
278         }
279
280         /* Invalid string */
281         return -1;
282 }
283
284 /**
285  * The crypto auth operation strings identifiers.
286  * It could be used in application command line.
287  */
288 const char *
289 rte_crypto_auth_operation_strings[] = {
290                 [RTE_CRYPTO_AUTH_OP_VERIFY]     = "verify",
291                 [RTE_CRYPTO_AUTH_OP_GENERATE]   = "generate"
292 };
293
294 const struct rte_cryptodev_symmetric_capability __vsym *
295 rte_cryptodev_sym_capability_get_v20(uint8_t dev_id,
296                 const struct rte_cryptodev_sym_capability_idx *idx)
297 {
298         const struct rte_cryptodev_capabilities *capability;
299         struct rte_cryptodev_info dev_info;
300         int i = 0;
301
302         rte_cryptodev_info_get_v20(dev_id, &dev_info);
303
304         while ((capability = &dev_info.capabilities[i++])->op !=
305                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
306                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
307                         continue;
308
309                 if (capability->sym.xform_type != idx->type)
310                         continue;
311
312                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
313                         capability->sym.auth.algo == idx->algo.auth)
314                         return &capability->sym;
315
316                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
317                         capability->sym.cipher.algo == idx->algo.cipher)
318                         return &capability->sym;
319
320                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
321                                 capability->sym.aead.algo == idx->algo.aead)
322                         return &capability->sym;
323         }
324
325         return NULL;
326 }
327 VERSION_SYMBOL(rte_cryptodev_sym_capability_get, _v20, 20.0);
328
329 const struct rte_cryptodev_symmetric_capability __vsym *
330 rte_cryptodev_sym_capability_get_v21(uint8_t dev_id,
331                 const struct rte_cryptodev_sym_capability_idx *idx)
332 {
333         const struct rte_cryptodev_capabilities *capability;
334         struct rte_cryptodev_info dev_info;
335         int i = 0;
336
337         rte_cryptodev_info_get(dev_id, &dev_info);
338
339         while ((capability = &dev_info.capabilities[i++])->op !=
340                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
341                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
342                         continue;
343
344                 if (capability->sym.xform_type != idx->type)
345                         continue;
346
347                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
348                         capability->sym.auth.algo == idx->algo.auth)
349                         return &capability->sym;
350
351                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
352                         capability->sym.cipher.algo == idx->algo.cipher)
353                         return &capability->sym;
354
355                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
356                                 capability->sym.aead.algo == idx->algo.aead)
357                         return &capability->sym;
358         }
359
360         return NULL;
361 }
362 MAP_STATIC_SYMBOL(const struct rte_cryptodev_symmetric_capability *
363                 rte_cryptodev_sym_capability_get(uint8_t dev_id,
364                 const struct rte_cryptodev_sym_capability_idx *idx),
365                 rte_cryptodev_sym_capability_get_v21);
366 BIND_DEFAULT_SYMBOL(rte_cryptodev_sym_capability_get, _v21, 21);
367
368 static int
369 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
370 {
371         unsigned int next_size;
372
373         /* Check lower/upper bounds */
374         if (size < range->min)
375                 return -1;
376
377         if (size > range->max)
378                 return -1;
379
380         /* If range is actually only one value, size is correct */
381         if (range->increment == 0)
382                 return 0;
383
384         /* Check if value is one of the supported sizes */
385         for (next_size = range->min; next_size <= range->max;
386                         next_size += range->increment)
387                 if (size == next_size)
388                         return 0;
389
390         return -1;
391 }
392
393 const struct rte_cryptodev_asymmetric_xform_capability *
394 rte_cryptodev_asym_capability_get(uint8_t dev_id,
395                 const struct rte_cryptodev_asym_capability_idx *idx)
396 {
397         const struct rte_cryptodev_capabilities *capability;
398         struct rte_cryptodev_info dev_info;
399         unsigned int i = 0;
400
401         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
402         rte_cryptodev_info_get(dev_id, &dev_info);
403
404         while ((capability = &dev_info.capabilities[i++])->op !=
405                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
406                 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
407                         continue;
408
409                 if (capability->asym.xform_capa.xform_type == idx->type)
410                         return &capability->asym.xform_capa;
411         }
412         return NULL;
413 };
414
415 int
416 rte_cryptodev_sym_capability_check_cipher(
417                 const struct rte_cryptodev_symmetric_capability *capability,
418                 uint16_t key_size, uint16_t iv_size)
419 {
420         if (param_range_check(key_size, &capability->cipher.key_size) != 0)
421                 return -1;
422
423         if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
424                 return -1;
425
426         return 0;
427 }
428
429 int
430 rte_cryptodev_sym_capability_check_auth(
431                 const struct rte_cryptodev_symmetric_capability *capability,
432                 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
433 {
434         if (param_range_check(key_size, &capability->auth.key_size) != 0)
435                 return -1;
436
437         if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
438                 return -1;
439
440         if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
441                 return -1;
442
443         return 0;
444 }
445
446 int
447 rte_cryptodev_sym_capability_check_aead(
448                 const struct rte_cryptodev_symmetric_capability *capability,
449                 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
450                 uint16_t iv_size)
451 {
452         if (param_range_check(key_size, &capability->aead.key_size) != 0)
453                 return -1;
454
455         if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
456                 return -1;
457
458         if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
459                 return -1;
460
461         if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
462                 return -1;
463
464         return 0;
465 }
466 int
467 rte_cryptodev_asym_xform_capability_check_optype(
468         const struct rte_cryptodev_asymmetric_xform_capability *capability,
469         enum rte_crypto_asym_op_type op_type)
470 {
471         if (capability->op_types & (1 << op_type))
472                 return 1;
473
474         return 0;
475 }
476
477 int
478 rte_cryptodev_asym_xform_capability_check_modlen(
479         const struct rte_cryptodev_asymmetric_xform_capability *capability,
480         uint16_t modlen)
481 {
482         /* no need to check for limits, if min or max = 0 */
483         if (capability->modlen.min != 0) {
484                 if (modlen < capability->modlen.min)
485                         return -1;
486         }
487
488         if (capability->modlen.max != 0) {
489                 if (modlen > capability->modlen.max)
490                         return -1;
491         }
492
493         /* in any case, check if given modlen is module increment */
494         if (capability->modlen.increment != 0) {
495                 if (modlen % (capability->modlen.increment))
496                         return -1;
497         }
498
499         return 0;
500 }
501
502
503 const char *
504 rte_cryptodev_get_feature_name(uint64_t flag)
505 {
506         switch (flag) {
507         case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
508                 return "SYMMETRIC_CRYPTO";
509         case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
510                 return "ASYMMETRIC_CRYPTO";
511         case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
512                 return "SYM_OPERATION_CHAINING";
513         case RTE_CRYPTODEV_FF_CPU_SSE:
514                 return "CPU_SSE";
515         case RTE_CRYPTODEV_FF_CPU_AVX:
516                 return "CPU_AVX";
517         case RTE_CRYPTODEV_FF_CPU_AVX2:
518                 return "CPU_AVX2";
519         case RTE_CRYPTODEV_FF_CPU_AVX512:
520                 return "CPU_AVX512";
521         case RTE_CRYPTODEV_FF_CPU_AESNI:
522                 return "CPU_AESNI";
523         case RTE_CRYPTODEV_FF_HW_ACCELERATED:
524                 return "HW_ACCELERATED";
525         case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
526                 return "IN_PLACE_SGL";
527         case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
528                 return "OOP_SGL_IN_SGL_OUT";
529         case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
530                 return "OOP_SGL_IN_LB_OUT";
531         case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
532                 return "OOP_LB_IN_SGL_OUT";
533         case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
534                 return "OOP_LB_IN_LB_OUT";
535         case RTE_CRYPTODEV_FF_CPU_NEON:
536                 return "CPU_NEON";
537         case RTE_CRYPTODEV_FF_CPU_ARM_CE:
538                 return "CPU_ARM_CE";
539         case RTE_CRYPTODEV_FF_SECURITY:
540                 return "SECURITY_PROTOCOL";
541         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
542                 return "RSA_PRIV_OP_KEY_EXP";
543         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
544                 return "RSA_PRIV_OP_KEY_QT";
545         case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
546                 return "DIGEST_ENCRYPTED";
547         case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
548                 return "SYM_CPU_CRYPTO";
549         case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
550                 return "ASYM_SESSIONLESS";
551         case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
552                 return "SYM_SESSIONLESS";
553         case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
554                 return "NON_BYTE_ALIGNED_DATA";
555         default:
556                 return NULL;
557         }
558 }
559
560 struct rte_cryptodev *
561 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
562 {
563         return &cryptodev_globals.devs[dev_id];
564 }
565
566 struct rte_cryptodev *
567 rte_cryptodev_pmd_get_named_dev(const char *name)
568 {
569         struct rte_cryptodev *dev;
570         unsigned int i;
571
572         if (name == NULL)
573                 return NULL;
574
575         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
576                 dev = &cryptodev_globals.devs[i];
577
578                 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
579                                 (strcmp(dev->data->name, name) == 0))
580                         return dev;
581         }
582
583         return NULL;
584 }
585
586 static inline uint8_t
587 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
588 {
589         if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
590                         rte_crypto_devices[dev_id].data == NULL)
591                 return 0;
592
593         return 1;
594 }
595
596 unsigned int
597 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
598 {
599         struct rte_cryptodev *dev = NULL;
600
601         if (!rte_cryptodev_is_valid_device_data(dev_id))
602                 return 0;
603
604         dev = rte_cryptodev_pmd_get_dev(dev_id);
605         if (dev->attached != RTE_CRYPTODEV_ATTACHED)
606                 return 0;
607         else
608                 return 1;
609 }
610
611
612 int
613 rte_cryptodev_get_dev_id(const char *name)
614 {
615         unsigned i;
616
617         if (name == NULL)
618                 return -1;
619
620         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
621                 if (!rte_cryptodev_is_valid_device_data(i))
622                         continue;
623                 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
624                                 == 0) &&
625                                 (cryptodev_globals.devs[i].attached ==
626                                                 RTE_CRYPTODEV_ATTACHED))
627                         return i;
628         }
629
630         return -1;
631 }
632
633 uint8_t
634 rte_cryptodev_count(void)
635 {
636         return cryptodev_globals.nb_devs;
637 }
638
639 uint8_t
640 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
641 {
642         uint8_t i, dev_count = 0;
643
644         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
645                 if (cryptodev_globals.devs[i].driver_id == driver_id &&
646                         cryptodev_globals.devs[i].attached ==
647                                         RTE_CRYPTODEV_ATTACHED)
648                         dev_count++;
649
650         return dev_count;
651 }
652
653 uint8_t
654 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
655         uint8_t nb_devices)
656 {
657         uint8_t i, count = 0;
658         struct rte_cryptodev *devs = cryptodev_globals.devs;
659
660         for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
661                 if (!rte_cryptodev_is_valid_device_data(i))
662                         continue;
663
664                 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
665                         int cmp;
666
667                         cmp = strncmp(devs[i].device->driver->name,
668                                         driver_name,
669                                         strlen(driver_name) + 1);
670
671                         if (cmp == 0)
672                                 devices[count++] = devs[i].data->dev_id;
673                 }
674         }
675
676         return count;
677 }
678
679 void *
680 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
681 {
682         if (dev_id < RTE_CRYPTO_MAX_DEVS &&
683                         (rte_crypto_devices[dev_id].feature_flags &
684                         RTE_CRYPTODEV_FF_SECURITY))
685                 return rte_crypto_devices[dev_id].security_ctx;
686
687         return NULL;
688 }
689
690 int
691 rte_cryptodev_socket_id(uint8_t dev_id)
692 {
693         struct rte_cryptodev *dev;
694
695         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
696                 return -1;
697
698         dev = rte_cryptodev_pmd_get_dev(dev_id);
699
700         return dev->data->socket_id;
701 }
702
703 static inline int
704 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
705                 int socket_id)
706 {
707         char mz_name[RTE_MEMZONE_NAMESIZE];
708         const struct rte_memzone *mz;
709         int n;
710
711         /* generate memzone name */
712         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
713         if (n >= (int)sizeof(mz_name))
714                 return -EINVAL;
715
716         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
717                 mz = rte_memzone_reserve(mz_name,
718                                 sizeof(struct rte_cryptodev_data),
719                                 socket_id, 0);
720                 CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
721                                 mz_name, mz);
722         } else {
723                 mz = rte_memzone_lookup(mz_name);
724                 CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
725                                 mz_name, mz);
726         }
727
728         if (mz == NULL)
729                 return -ENOMEM;
730
731         *data = mz->addr;
732         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
733                 memset(*data, 0, sizeof(struct rte_cryptodev_data));
734
735         return 0;
736 }
737
738 static inline int
739 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
740 {
741         char mz_name[RTE_MEMZONE_NAMESIZE];
742         const struct rte_memzone *mz;
743         int n;
744
745         /* generate memzone name */
746         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
747         if (n >= (int)sizeof(mz_name))
748                 return -EINVAL;
749
750         mz = rte_memzone_lookup(mz_name);
751         if (mz == NULL)
752                 return -ENOMEM;
753
754         RTE_ASSERT(*data == mz->addr);
755         *data = NULL;
756
757         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
758                 CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
759                                 mz_name, mz);
760                 return rte_memzone_free(mz);
761         } else {
762                 CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
763                                 mz_name, mz);
764         }
765
766         return 0;
767 }
768
769 static uint8_t
770 rte_cryptodev_find_free_device_index(void)
771 {
772         uint8_t dev_id;
773
774         for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
775                 if (rte_crypto_devices[dev_id].attached ==
776                                 RTE_CRYPTODEV_DETACHED)
777                         return dev_id;
778         }
779         return RTE_CRYPTO_MAX_DEVS;
780 }
781
782 struct rte_cryptodev *
783 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
784 {
785         struct rte_cryptodev *cryptodev;
786         uint8_t dev_id;
787
788         if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
789                 CDEV_LOG_ERR("Crypto device with name %s already "
790                                 "allocated!", name);
791                 return NULL;
792         }
793
794         dev_id = rte_cryptodev_find_free_device_index();
795         if (dev_id == RTE_CRYPTO_MAX_DEVS) {
796                 CDEV_LOG_ERR("Reached maximum number of crypto devices");
797                 return NULL;
798         }
799
800         cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
801
802         if (cryptodev->data == NULL) {
803                 struct rte_cryptodev_data **cryptodev_data =
804                                 &cryptodev_globals.data[dev_id];
805
806                 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
807                                 socket_id);
808
809                 if (retval < 0 || *cryptodev_data == NULL)
810                         return NULL;
811
812                 cryptodev->data = *cryptodev_data;
813
814                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
815                         strlcpy(cryptodev->data->name, name,
816                                 RTE_CRYPTODEV_NAME_MAX_LEN);
817
818                         cryptodev->data->dev_id = dev_id;
819                         cryptodev->data->socket_id = socket_id;
820                         cryptodev->data->dev_started = 0;
821                         CDEV_LOG_DEBUG("PRIMARY:init data");
822                 }
823
824                 CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
825                                 cryptodev->data->name,
826                                 cryptodev->data->dev_id,
827                                 cryptodev->data->socket_id,
828                                 cryptodev->data->dev_started);
829
830                 /* init user callbacks */
831                 TAILQ_INIT(&(cryptodev->link_intr_cbs));
832
833                 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
834
835                 cryptodev_globals.nb_devs++;
836         }
837
838         return cryptodev;
839 }
840
841 int
842 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
843 {
844         int ret;
845         uint8_t dev_id;
846
847         if (cryptodev == NULL)
848                 return -EINVAL;
849
850         dev_id = cryptodev->data->dev_id;
851
852         /* Close device only if device operations have been set */
853         if (cryptodev->dev_ops) {
854                 ret = rte_cryptodev_close(dev_id);
855                 if (ret < 0)
856                         return ret;
857         }
858
859         ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
860         if (ret < 0)
861                 return ret;
862
863         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
864         cryptodev_globals.nb_devs--;
865         return 0;
866 }
867
868 uint16_t
869 rte_cryptodev_queue_pair_count(uint8_t dev_id)
870 {
871         struct rte_cryptodev *dev;
872
873         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
874                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
875                 return 0;
876         }
877
878         dev = &rte_crypto_devices[dev_id];
879         return dev->data->nb_queue_pairs;
880 }
881
882 static int
883 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
884                 int socket_id)
885 {
886         struct rte_cryptodev_info dev_info;
887         void **qp;
888         unsigned i;
889
890         if ((dev == NULL) || (nb_qpairs < 1)) {
891                 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
892                                                         dev, nb_qpairs);
893                 return -EINVAL;
894         }
895
896         CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
897                         nb_qpairs, dev->data->dev_id);
898
899         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
900
901         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
902         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
903
904         if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
905                 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
906                                 nb_qpairs, dev->data->dev_id);
907             return -EINVAL;
908         }
909
910         if (dev->data->queue_pairs == NULL) { /* first time configuration */
911                 dev->data->queue_pairs = rte_zmalloc_socket(
912                                 "cryptodev->queue_pairs",
913                                 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
914                                 RTE_CACHE_LINE_SIZE, socket_id);
915
916                 if (dev->data->queue_pairs == NULL) {
917                         dev->data->nb_queue_pairs = 0;
918                         CDEV_LOG_ERR("failed to get memory for qp meta data, "
919                                                         "nb_queues %u",
920                                                         nb_qpairs);
921                         return -(ENOMEM);
922                 }
923         } else { /* re-configure */
924                 int ret;
925                 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
926
927                 qp = dev->data->queue_pairs;
928
929                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
930                                 -ENOTSUP);
931
932                 for (i = nb_qpairs; i < old_nb_queues; i++) {
933                         ret = (*dev->dev_ops->queue_pair_release)(dev, i);
934                         if (ret < 0)
935                                 return ret;
936                 }
937
938                 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
939                                 RTE_CACHE_LINE_SIZE);
940                 if (qp == NULL) {
941                         CDEV_LOG_ERR("failed to realloc qp meta data,"
942                                                 " nb_queues %u", nb_qpairs);
943                         return -(ENOMEM);
944                 }
945
946                 if (nb_qpairs > old_nb_queues) {
947                         uint16_t new_qs = nb_qpairs - old_nb_queues;
948
949                         memset(qp + old_nb_queues, 0,
950                                 sizeof(qp[0]) * new_qs);
951                 }
952
953                 dev->data->queue_pairs = qp;
954
955         }
956         dev->data->nb_queue_pairs = nb_qpairs;
957         return 0;
958 }
959
960 int
961 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
962 {
963         struct rte_cryptodev *dev;
964         int diag;
965
966         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
967                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
968                 return -EINVAL;
969         }
970
971         dev = &rte_crypto_devices[dev_id];
972
973         if (dev->data->dev_started) {
974                 CDEV_LOG_ERR(
975                     "device %d must be stopped to allow configuration", dev_id);
976                 return -EBUSY;
977         }
978
979         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
980
981         /* Setup new number of queue pairs and reconfigure device. */
982         diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
983                         config->socket_id);
984         if (diag != 0) {
985                 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
986                                 dev_id, diag);
987                 return diag;
988         }
989
990         rte_cryptodev_trace_configure(dev_id, config);
991         return (*dev->dev_ops->dev_configure)(dev, config);
992 }
993
994
995 int
996 rte_cryptodev_start(uint8_t dev_id)
997 {
998         struct rte_cryptodev *dev;
999         int diag;
1000
1001         CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1002
1003         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1004                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1005                 return -EINVAL;
1006         }
1007
1008         dev = &rte_crypto_devices[dev_id];
1009
1010         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1011
1012         if (dev->data->dev_started != 0) {
1013                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1014                         dev_id);
1015                 return 0;
1016         }
1017
1018         diag = (*dev->dev_ops->dev_start)(dev);
1019         rte_cryptodev_trace_start(dev_id, diag);
1020         if (diag == 0)
1021                 dev->data->dev_started = 1;
1022         else
1023                 return diag;
1024
1025         return 0;
1026 }
1027
1028 void
1029 rte_cryptodev_stop(uint8_t dev_id)
1030 {
1031         struct rte_cryptodev *dev;
1032
1033         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1034                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1035                 return;
1036         }
1037
1038         dev = &rte_crypto_devices[dev_id];
1039
1040         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1041
1042         if (dev->data->dev_started == 0) {
1043                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1044                         dev_id);
1045                 return;
1046         }
1047
1048         (*dev->dev_ops->dev_stop)(dev);
1049         rte_cryptodev_trace_stop(dev_id);
1050         dev->data->dev_started = 0;
1051 }
1052
1053 int
1054 rte_cryptodev_close(uint8_t dev_id)
1055 {
1056         struct rte_cryptodev *dev;
1057         int retval;
1058
1059         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1060                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1061                 return -1;
1062         }
1063
1064         dev = &rte_crypto_devices[dev_id];
1065
1066         /* Device must be stopped before it can be closed */
1067         if (dev->data->dev_started == 1) {
1068                 CDEV_LOG_ERR("Device %u must be stopped before closing",
1069                                 dev_id);
1070                 return -EBUSY;
1071         }
1072
1073         /* We can't close the device if there are outstanding sessions in use */
1074         if (dev->data->session_pool != NULL) {
1075                 if (!rte_mempool_full(dev->data->session_pool)) {
1076                         CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1077                                         "has sessions still in use, free "
1078                                         "all sessions before calling close",
1079                                         (unsigned)dev_id);
1080                         return -EBUSY;
1081                 }
1082         }
1083
1084         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1085         retval = (*dev->dev_ops->dev_close)(dev);
1086         rte_cryptodev_trace_close(dev_id, retval);
1087
1088         if (capability_copy[dev_id]) {
1089                 free(capability_copy[dev_id]);
1090                 capability_copy[dev_id] = NULL;
1091         }
1092         is_capability_checked[dev_id] = 0;
1093
1094         if (retval < 0)
1095                 return retval;
1096
1097         return 0;
1098 }
1099
1100 int
1101 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1102 {
1103         struct rte_cryptodev *dev;
1104
1105         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1106                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1107                 return -EINVAL;
1108         }
1109
1110         dev = &rte_crypto_devices[dev_id];
1111         if (queue_pair_id >= dev->data->nb_queue_pairs) {
1112                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1113                 return -EINVAL;
1114         }
1115         void **qps = dev->data->queue_pairs;
1116
1117         if (qps[queue_pair_id]) {
1118                 CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1119                         queue_pair_id, dev_id);
1120                 return 1;
1121         }
1122
1123         CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1124                 queue_pair_id, dev_id);
1125
1126         return 0;
1127 }
1128
1129 int
1130 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1131                 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1132
1133 {
1134         struct rte_cryptodev *dev;
1135
1136         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1137                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1138                 return -EINVAL;
1139         }
1140
1141         dev = &rte_crypto_devices[dev_id];
1142         if (queue_pair_id >= dev->data->nb_queue_pairs) {
1143                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1144                 return -EINVAL;
1145         }
1146
1147         if (!qp_conf) {
1148                 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1149                 return -EINVAL;
1150         }
1151
1152         if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1153                         (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1154                 CDEV_LOG_ERR("Invalid mempools\n");
1155                 return -EINVAL;
1156         }
1157
1158         if (qp_conf->mp_session) {
1159                 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1160                 uint32_t obj_size = qp_conf->mp_session->elt_size;
1161                 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1162                 struct rte_cryptodev_sym_session s = {0};
1163
1164                 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1165                 if (!pool_priv || qp_conf->mp_session->private_data_size <
1166                                 sizeof(*pool_priv)) {
1167                         CDEV_LOG_ERR("Invalid mempool\n");
1168                         return -EINVAL;
1169                 }
1170
1171                 s.nb_drivers = pool_priv->nb_drivers;
1172                 s.user_data_sz = pool_priv->user_data_sz;
1173
1174                 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1175                         obj_size) || (s.nb_drivers <= dev->driver_id) ||
1176                         rte_cryptodev_sym_get_private_session_size(dev_id) >
1177                                 obj_priv_size) {
1178                         CDEV_LOG_ERR("Invalid mempool\n");
1179                         return -EINVAL;
1180                 }
1181         }
1182
1183         if (dev->data->dev_started) {
1184                 CDEV_LOG_ERR(
1185                     "device %d must be stopped to allow configuration", dev_id);
1186                 return -EBUSY;
1187         }
1188
1189         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1190
1191         rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1192         return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1193                         socket_id);
1194 }
1195
1196
1197 int
1198 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1199 {
1200         struct rte_cryptodev *dev;
1201
1202         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1203                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1204                 return -ENODEV;
1205         }
1206
1207         if (stats == NULL) {
1208                 CDEV_LOG_ERR("Invalid stats ptr");
1209                 return -EINVAL;
1210         }
1211
1212         dev = &rte_crypto_devices[dev_id];
1213         memset(stats, 0, sizeof(*stats));
1214
1215         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1216         (*dev->dev_ops->stats_get)(dev, stats);
1217         return 0;
1218 }
1219
1220 void
1221 rte_cryptodev_stats_reset(uint8_t dev_id)
1222 {
1223         struct rte_cryptodev *dev;
1224
1225         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1226                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1227                 return;
1228         }
1229
1230         dev = &rte_crypto_devices[dev_id];
1231
1232         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1233         (*dev->dev_ops->stats_reset)(dev);
1234 }
1235
1236 static void
1237 get_v20_capabilities(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1238 {
1239         const struct rte_cryptodev_capabilities *capability;
1240         uint8_t found_invalid_capa = 0;
1241         uint8_t counter = 0;
1242
1243         for (capability = dev_info->capabilities;
1244                         capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
1245                         ++capability, ++counter) {
1246                 if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
1247                                 capability->sym.xform_type ==
1248                                         RTE_CRYPTO_SYM_XFORM_AEAD
1249                                 && capability->sym.aead.algo >=
1250                                 RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
1251                         found_invalid_capa = 1;
1252                         counter--;
1253                 }
1254         }
1255         is_capability_checked[dev_id] = 1;
1256         if (!found_invalid_capa)
1257                 return;
1258         capability_copy[dev_id] = malloc(counter *
1259                 sizeof(struct rte_cryptodev_capabilities));
1260         if (capability_copy[dev_id] == NULL) {
1261                  /*
1262                   * error case - no memory to store the trimmed
1263                   * list, so have to return an empty list
1264                   */
1265                 dev_info->capabilities =
1266                         cryptodev_undefined_capabilities;
1267                 is_capability_checked[dev_id] = 0;
1268         } else {
1269                 counter = 0;
1270                 for (capability = dev_info->capabilities;
1271                                 capability->op !=
1272                                 RTE_CRYPTO_OP_TYPE_UNDEFINED;
1273                                 capability++) {
1274                         if (!(capability->op ==
1275                                 RTE_CRYPTO_OP_TYPE_SYMMETRIC
1276                                 && capability->sym.xform_type ==
1277                                 RTE_CRYPTO_SYM_XFORM_AEAD
1278                                 && capability->sym.aead.algo >=
1279                                 RTE_CRYPTO_AEAD_CHACHA20_POLY1305)) {
1280                                 capability_copy[dev_id][counter++] =
1281                                                 *capability;
1282                         }
1283                 }
1284                 dev_info->capabilities =
1285                                 capability_copy[dev_id];
1286         }
1287 }
1288
1289 void __vsym
1290 rte_cryptodev_info_get_v20(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1291 {
1292         struct rte_cryptodev *dev;
1293
1294         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1295                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1296                 return;
1297         }
1298
1299         dev = &rte_crypto_devices[dev_id];
1300
1301         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1302
1303         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1304         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1305
1306         if (capability_copy[dev_id] == NULL) {
1307                 if (!is_capability_checked[dev_id])
1308                         get_v20_capabilities(dev_id, dev_info);
1309         } else
1310                 dev_info->capabilities = capability_copy[dev_id];
1311
1312         dev_info->driver_name = dev->device->driver->name;
1313         dev_info->device = dev->device;
1314 }
1315 VERSION_SYMBOL(rte_cryptodev_info_get, _v20, 20.0);
1316
1317 void __vsym
1318 rte_cryptodev_info_get_v21(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1319 {
1320         struct rte_cryptodev *dev;
1321
1322         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1323                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1324                 return;
1325         }
1326
1327         dev = &rte_crypto_devices[dev_id];
1328
1329         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1330
1331         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1332         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1333
1334         dev_info->driver_name = dev->device->driver->name;
1335         dev_info->device = dev->device;
1336 }
1337 MAP_STATIC_SYMBOL(void rte_cryptodev_info_get(uint8_t dev_id,
1338         struct rte_cryptodev_info *dev_info), rte_cryptodev_info_get_v21);
1339 BIND_DEFAULT_SYMBOL(rte_cryptodev_info_get, _v21, 21);
1340
1341 int
1342 rte_cryptodev_callback_register(uint8_t dev_id,
1343                         enum rte_cryptodev_event_type event,
1344                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1345 {
1346         struct rte_cryptodev *dev;
1347         struct rte_cryptodev_callback *user_cb;
1348
1349         if (!cb_fn)
1350                 return -EINVAL;
1351
1352         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1353                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1354                 return -EINVAL;
1355         }
1356
1357         dev = &rte_crypto_devices[dev_id];
1358         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1359
1360         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1361                 if (user_cb->cb_fn == cb_fn &&
1362                         user_cb->cb_arg == cb_arg &&
1363                         user_cb->event == event) {
1364                         break;
1365                 }
1366         }
1367
1368         /* create a new callback. */
1369         if (user_cb == NULL) {
1370                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1371                                 sizeof(struct rte_cryptodev_callback), 0);
1372                 if (user_cb != NULL) {
1373                         user_cb->cb_fn = cb_fn;
1374                         user_cb->cb_arg = cb_arg;
1375                         user_cb->event = event;
1376                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1377                 }
1378         }
1379
1380         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1381         return (user_cb == NULL) ? -ENOMEM : 0;
1382 }
1383
1384 int
1385 rte_cryptodev_callback_unregister(uint8_t dev_id,
1386                         enum rte_cryptodev_event_type event,
1387                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1388 {
1389         int ret;
1390         struct rte_cryptodev *dev;
1391         struct rte_cryptodev_callback *cb, *next;
1392
1393         if (!cb_fn)
1394                 return -EINVAL;
1395
1396         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1397                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1398                 return -EINVAL;
1399         }
1400
1401         dev = &rte_crypto_devices[dev_id];
1402         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1403
1404         ret = 0;
1405         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1406
1407                 next = TAILQ_NEXT(cb, next);
1408
1409                 if (cb->cb_fn != cb_fn || cb->event != event ||
1410                                 (cb->cb_arg != (void *)-1 &&
1411                                 cb->cb_arg != cb_arg))
1412                         continue;
1413
1414                 /*
1415                  * if this callback is not executing right now,
1416                  * then remove it.
1417                  */
1418                 if (cb->active == 0) {
1419                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1420                         rte_free(cb);
1421                 } else {
1422                         ret = -EAGAIN;
1423                 }
1424         }
1425
1426         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1427         return ret;
1428 }
1429
1430 void
1431 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1432         enum rte_cryptodev_event_type event)
1433 {
1434         struct rte_cryptodev_callback *cb_lst;
1435         struct rte_cryptodev_callback dev_cb;
1436
1437         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1438         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1439                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1440                         continue;
1441                 dev_cb = *cb_lst;
1442                 cb_lst->active = 1;
1443                 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1444                 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1445                                                 dev_cb.cb_arg);
1446                 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1447                 cb_lst->active = 0;
1448         }
1449         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1450 }
1451
1452
1453 int
1454 rte_cryptodev_sym_session_init(uint8_t dev_id,
1455                 struct rte_cryptodev_sym_session *sess,
1456                 struct rte_crypto_sym_xform *xforms,
1457                 struct rte_mempool *mp)
1458 {
1459         struct rte_cryptodev *dev;
1460         uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1461                         dev_id);
1462         uint8_t index;
1463         int ret;
1464
1465         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1466                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1467                 return -EINVAL;
1468         }
1469
1470         dev = rte_cryptodev_pmd_get_dev(dev_id);
1471
1472         if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1473                 return -EINVAL;
1474
1475         if (mp->elt_size < sess_priv_sz)
1476                 return -EINVAL;
1477
1478         index = dev->driver_id;
1479         if (index >= sess->nb_drivers)
1480                 return -EINVAL;
1481
1482         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1483
1484         if (sess->sess_data[index].refcnt == 0) {
1485                 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1486                                                         sess, mp);
1487                 if (ret < 0) {
1488                         CDEV_LOG_ERR(
1489                                 "dev_id %d failed to configure session details",
1490                                 dev_id);
1491                         return ret;
1492                 }
1493         }
1494
1495         rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1496         sess->sess_data[index].refcnt++;
1497         return 0;
1498 }
1499
1500 int
1501 rte_cryptodev_asym_session_init(uint8_t dev_id,
1502                 struct rte_cryptodev_asym_session *sess,
1503                 struct rte_crypto_asym_xform *xforms,
1504                 struct rte_mempool *mp)
1505 {
1506         struct rte_cryptodev *dev;
1507         uint8_t index;
1508         int ret;
1509
1510         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1511                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1512                 return -EINVAL;
1513         }
1514
1515         dev = rte_cryptodev_pmd_get_dev(dev_id);
1516
1517         if (sess == NULL || xforms == NULL || dev == NULL)
1518                 return -EINVAL;
1519
1520         index = dev->driver_id;
1521
1522         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1523                                 -ENOTSUP);
1524
1525         if (sess->sess_private_data[index] == NULL) {
1526                 ret = dev->dev_ops->asym_session_configure(dev,
1527                                                         xforms,
1528                                                         sess, mp);
1529                 if (ret < 0) {
1530                         CDEV_LOG_ERR(
1531                                 "dev_id %d failed to configure session details",
1532                                 dev_id);
1533                         return ret;
1534                 }
1535         }
1536
1537         rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1538         return 0;
1539 }
1540
1541 struct rte_mempool *
1542 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1543         uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1544         int socket_id)
1545 {
1546         struct rte_mempool *mp;
1547         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1548         uint32_t obj_sz;
1549
1550         obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1551         if (obj_sz > elt_size)
1552                 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1553                                 obj_sz);
1554         else
1555                 obj_sz = elt_size;
1556
1557         mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1558                         (uint32_t)(sizeof(*pool_priv)),
1559                         NULL, NULL, NULL, NULL,
1560                         socket_id, 0);
1561         if (mp == NULL) {
1562                 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1563                         __func__, name, rte_errno);
1564                 return NULL;
1565         }
1566
1567         pool_priv = rte_mempool_get_priv(mp);
1568         if (!pool_priv) {
1569                 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1570                         __func__, name);
1571                 rte_mempool_free(mp);
1572                 return NULL;
1573         }
1574
1575         pool_priv->nb_drivers = nb_drivers;
1576         pool_priv->user_data_sz = user_data_size;
1577
1578         rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1579                 elt_size, cache_size, user_data_size, mp);
1580         return mp;
1581 }
1582
1583 static unsigned int
1584 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1585 {
1586         return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1587                         sess->user_data_sz;
1588 }
1589
1590 static uint8_t
1591 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1592 {
1593         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1594
1595         if (!mp)
1596                 return 0;
1597
1598         pool_priv = rte_mempool_get_priv(mp);
1599
1600         if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1601                         pool_priv->nb_drivers != nb_drivers ||
1602                         mp->elt_size <
1603                                 rte_cryptodev_sym_get_header_session_size()
1604                                 + pool_priv->user_data_sz)
1605                 return 0;
1606
1607         return 1;
1608 }
1609
1610 struct rte_cryptodev_sym_session *
1611 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1612 {
1613         struct rte_cryptodev_sym_session *sess;
1614         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1615
1616         if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1617                 CDEV_LOG_ERR("Invalid mempool\n");
1618                 return NULL;
1619         }
1620
1621         pool_priv = rte_mempool_get_priv(mp);
1622
1623         /* Allocate a session structure from the session pool */
1624         if (rte_mempool_get(mp, (void **)&sess)) {
1625                 CDEV_LOG_ERR("couldn't get object from session mempool");
1626                 return NULL;
1627         }
1628
1629         sess->nb_drivers = pool_priv->nb_drivers;
1630         sess->user_data_sz = pool_priv->user_data_sz;
1631         sess->opaque_data = 0;
1632
1633         /* Clear device session pointer.
1634          * Include the flag indicating presence of user data
1635          */
1636         memset(sess->sess_data, 0,
1637                         rte_cryptodev_sym_session_data_size(sess));
1638
1639         rte_cryptodev_trace_sym_session_create(mp, sess);
1640         return sess;
1641 }
1642
1643 struct rte_cryptodev_asym_session *
1644 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1645 {
1646         struct rte_cryptodev_asym_session *sess;
1647         unsigned int session_size =
1648                         rte_cryptodev_asym_get_header_session_size();
1649
1650         if (!mp) {
1651                 CDEV_LOG_ERR("invalid mempool\n");
1652                 return NULL;
1653         }
1654
1655         /* Verify if provided mempool can hold elements big enough. */
1656         if (mp->elt_size < session_size) {
1657                 CDEV_LOG_ERR(
1658                         "mempool elements too small to hold session objects");
1659                 return NULL;
1660         }
1661
1662         /* Allocate a session structure from the session pool */
1663         if (rte_mempool_get(mp, (void **)&sess)) {
1664                 CDEV_LOG_ERR("couldn't get object from session mempool");
1665                 return NULL;
1666         }
1667
1668         /* Clear device session pointer.
1669          * Include the flag indicating presence of private data
1670          */
1671         memset(sess, 0, session_size);
1672
1673         rte_cryptodev_trace_asym_session_create(mp, sess);
1674         return sess;
1675 }
1676
1677 int
1678 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1679                 struct rte_cryptodev_sym_session *sess)
1680 {
1681         struct rte_cryptodev *dev;
1682         uint8_t driver_id;
1683
1684         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1685                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1686                 return -EINVAL;
1687         }
1688
1689         dev = rte_cryptodev_pmd_get_dev(dev_id);
1690
1691         if (dev == NULL || sess == NULL)
1692                 return -EINVAL;
1693
1694         driver_id = dev->driver_id;
1695         if (sess->sess_data[driver_id].refcnt == 0)
1696                 return 0;
1697         if (--sess->sess_data[driver_id].refcnt != 0)
1698                 return -EBUSY;
1699
1700         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1701
1702         dev->dev_ops->sym_session_clear(dev, sess);
1703
1704         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1705         return 0;
1706 }
1707
1708 int
1709 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1710                 struct rte_cryptodev_asym_session *sess)
1711 {
1712         struct rte_cryptodev *dev;
1713
1714         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1715                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1716                 return -EINVAL;
1717         }
1718
1719         dev = rte_cryptodev_pmd_get_dev(dev_id);
1720
1721         if (dev == NULL || sess == NULL)
1722                 return -EINVAL;
1723
1724         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1725
1726         dev->dev_ops->asym_session_clear(dev, sess);
1727
1728         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1729         return 0;
1730 }
1731
1732 int
1733 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1734 {
1735         uint8_t i;
1736         struct rte_mempool *sess_mp;
1737
1738         if (sess == NULL)
1739                 return -EINVAL;
1740
1741         /* Check that all device private data has been freed */
1742         for (i = 0; i < sess->nb_drivers; i++) {
1743                 if (sess->sess_data[i].refcnt != 0)
1744                         return -EBUSY;
1745         }
1746
1747         /* Return session to mempool */
1748         sess_mp = rte_mempool_from_obj(sess);
1749         rte_mempool_put(sess_mp, sess);
1750
1751         rte_cryptodev_trace_sym_session_free(sess);
1752         return 0;
1753 }
1754
1755 int
1756 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1757 {
1758         uint8_t i;
1759         void *sess_priv;
1760         struct rte_mempool *sess_mp;
1761
1762         if (sess == NULL)
1763                 return -EINVAL;
1764
1765         /* Check that all device private data has been freed */
1766         for (i = 0; i < nb_drivers; i++) {
1767                 sess_priv = get_asym_session_private_data(sess, i);
1768                 if (sess_priv != NULL)
1769                         return -EBUSY;
1770         }
1771
1772         /* Return session to mempool */
1773         sess_mp = rte_mempool_from_obj(sess);
1774         rte_mempool_put(sess_mp, sess);
1775
1776         rte_cryptodev_trace_asym_session_free(sess);
1777         return 0;
1778 }
1779
1780 unsigned int
1781 rte_cryptodev_sym_get_header_session_size(void)
1782 {
1783         /*
1784          * Header contains pointers to the private data of all registered
1785          * drivers and all necessary information to ensure safely clear
1786          * or free al session.
1787          */
1788         struct rte_cryptodev_sym_session s = {0};
1789
1790         s.nb_drivers = nb_drivers;
1791
1792         return (unsigned int)(sizeof(s) +
1793                         rte_cryptodev_sym_session_data_size(&s));
1794 }
1795
1796 unsigned int
1797 rte_cryptodev_sym_get_existing_header_session_size(
1798                 struct rte_cryptodev_sym_session *sess)
1799 {
1800         if (!sess)
1801                 return 0;
1802         else
1803                 return (unsigned int)(sizeof(*sess) +
1804                                 rte_cryptodev_sym_session_data_size(sess));
1805 }
1806
1807 unsigned int
1808 rte_cryptodev_asym_get_header_session_size(void)
1809 {
1810         /*
1811          * Header contains pointers to the private data
1812          * of all registered drivers, and a flag which
1813          * indicates presence of private data
1814          */
1815         return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1816 }
1817
1818 unsigned int
1819 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1820 {
1821         struct rte_cryptodev *dev;
1822         unsigned int priv_sess_size;
1823
1824         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1825                 return 0;
1826
1827         dev = rte_cryptodev_pmd_get_dev(dev_id);
1828
1829         if (*dev->dev_ops->sym_session_get_size == NULL)
1830                 return 0;
1831
1832         priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1833
1834         return priv_sess_size;
1835 }
1836
1837 unsigned int
1838 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1839 {
1840         struct rte_cryptodev *dev;
1841         unsigned int header_size = sizeof(void *) * nb_drivers;
1842         unsigned int priv_sess_size;
1843
1844         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1845                 return 0;
1846
1847         dev = rte_cryptodev_pmd_get_dev(dev_id);
1848
1849         if (*dev->dev_ops->asym_session_get_size == NULL)
1850                 return 0;
1851
1852         priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1853         if (priv_sess_size < header_size)
1854                 return header_size;
1855
1856         return priv_sess_size;
1857
1858 }
1859
1860 int
1861 rte_cryptodev_sym_session_set_user_data(
1862                                         struct rte_cryptodev_sym_session *sess,
1863                                         void *data,
1864                                         uint16_t size)
1865 {
1866         if (sess == NULL)
1867                 return -EINVAL;
1868
1869         if (sess->user_data_sz < size)
1870                 return -ENOMEM;
1871
1872         rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1873         return 0;
1874 }
1875
1876 void *
1877 rte_cryptodev_sym_session_get_user_data(
1878                                         struct rte_cryptodev_sym_session *sess)
1879 {
1880         if (sess == NULL || sess->user_data_sz == 0)
1881                 return NULL;
1882
1883         return (void *)(sess->sess_data + sess->nb_drivers);
1884 }
1885
1886 static inline void
1887 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
1888 {
1889         uint32_t i;
1890         for (i = 0; i < vec->num; i++)
1891                 vec->status[i] = errnum;
1892 }
1893
1894 uint32_t
1895 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1896         struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1897         struct rte_crypto_sym_vec *vec)
1898 {
1899         struct rte_cryptodev *dev;
1900
1901         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1902                 sym_crypto_fill_status(vec, EINVAL);
1903                 return 0;
1904         }
1905
1906         dev = rte_cryptodev_pmd_get_dev(dev_id);
1907
1908         if (*dev->dev_ops->sym_cpu_process == NULL ||
1909                 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
1910                 sym_crypto_fill_status(vec, ENOTSUP);
1911                 return 0;
1912         }
1913
1914         return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
1915 }
1916
1917 /** Initialise rte_crypto_op mempool element */
1918 static void
1919 rte_crypto_op_init(struct rte_mempool *mempool,
1920                 void *opaque_arg,
1921                 void *_op_data,
1922                 __rte_unused unsigned i)
1923 {
1924         struct rte_crypto_op *op = _op_data;
1925         enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1926
1927         memset(_op_data, 0, mempool->elt_size);
1928
1929         __rte_crypto_op_reset(op, type);
1930
1931         op->phys_addr = rte_mem_virt2iova(_op_data);
1932         op->mempool = mempool;
1933 }
1934
1935
1936 struct rte_mempool *
1937 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1938                 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1939                 int socket_id)
1940 {
1941         struct rte_crypto_op_pool_private *priv;
1942
1943         unsigned elt_size = sizeof(struct rte_crypto_op) +
1944                         priv_size;
1945
1946         if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1947                 elt_size += sizeof(struct rte_crypto_sym_op);
1948         } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1949                 elt_size += sizeof(struct rte_crypto_asym_op);
1950         } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1951                 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1952                                     sizeof(struct rte_crypto_asym_op));
1953         } else {
1954                 CDEV_LOG_ERR("Invalid op_type\n");
1955                 return NULL;
1956         }
1957
1958         /* lookup mempool in case already allocated */
1959         struct rte_mempool *mp = rte_mempool_lookup(name);
1960
1961         if (mp != NULL) {
1962                 priv = (struct rte_crypto_op_pool_private *)
1963                                 rte_mempool_get_priv(mp);
1964
1965                 if (mp->elt_size != elt_size ||
1966                                 mp->cache_size < cache_size ||
1967                                 mp->size < nb_elts ||
1968                                 priv->priv_size <  priv_size) {
1969                         mp = NULL;
1970                         CDEV_LOG_ERR("Mempool %s already exists but with "
1971                                         "incompatible parameters", name);
1972                         return NULL;
1973                 }
1974                 return mp;
1975         }
1976
1977         mp = rte_mempool_create(
1978                         name,
1979                         nb_elts,
1980                         elt_size,
1981                         cache_size,
1982                         sizeof(struct rte_crypto_op_pool_private),
1983                         NULL,
1984                         NULL,
1985                         rte_crypto_op_init,
1986                         &type,
1987                         socket_id,
1988                         0);
1989
1990         if (mp == NULL) {
1991                 CDEV_LOG_ERR("Failed to create mempool %s", name);
1992                 return NULL;
1993         }
1994
1995         priv = (struct rte_crypto_op_pool_private *)
1996                         rte_mempool_get_priv(mp);
1997
1998         priv->priv_size = priv_size;
1999         priv->type = type;
2000
2001         return mp;
2002 }
2003
2004 int
2005 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2006 {
2007         struct rte_cryptodev *dev = NULL;
2008         uint32_t i = 0;
2009
2010         if (name == NULL)
2011                 return -EINVAL;
2012
2013         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2014                 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2015                                 "%s_%u", dev_name_prefix, i);
2016
2017                 if (ret < 0)
2018                         return ret;
2019
2020                 dev = rte_cryptodev_pmd_get_named_dev(name);
2021                 if (!dev)
2022                         return 0;
2023         }
2024
2025         return -1;
2026 }
2027
2028 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2029
2030 static struct cryptodev_driver_list cryptodev_driver_list =
2031         TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2032
2033 int
2034 rte_cryptodev_driver_id_get(const char *name)
2035 {
2036         struct cryptodev_driver *driver;
2037         const char *driver_name;
2038
2039         if (name == NULL) {
2040                 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2041                 return -1;
2042         }
2043
2044         TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2045                 driver_name = driver->driver->name;
2046                 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2047                         return driver->id;
2048         }
2049         return -1;
2050 }
2051
2052 const char *
2053 rte_cryptodev_name_get(uint8_t dev_id)
2054 {
2055         struct rte_cryptodev *dev;
2056
2057         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2058                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2059                 return NULL;
2060         }
2061
2062         dev = rte_cryptodev_pmd_get_dev(dev_id);
2063         if (dev == NULL)
2064                 return NULL;
2065
2066         return dev->data->name;
2067 }
2068
2069 const char *
2070 rte_cryptodev_driver_name_get(uint8_t driver_id)
2071 {
2072         struct cryptodev_driver *driver;
2073
2074         TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2075                 if (driver->id == driver_id)
2076                         return driver->driver->name;
2077         return NULL;
2078 }
2079
2080 uint8_t
2081 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2082                 const struct rte_driver *drv)
2083 {
2084         crypto_drv->driver = drv;
2085         crypto_drv->id = nb_drivers;
2086
2087         TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2088
2089         return nb_drivers++;
2090 }