cryptodev: fix ABI compatibility for ChaCha20-Poly1305
[dpdk.git] / lib / librte_cryptodev / rte_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39 #include <rte_compat.h>
40 #include <rte_function_versioning.h>
41
42 #include "rte_crypto.h"
43 #include "rte_cryptodev.h"
44 #include "rte_cryptodev_pmd.h"
45 #include "rte_cryptodev_trace.h"
46
47 static uint8_t nb_drivers;
48
49 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
50
51 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
52
53 static struct rte_cryptodev_global cryptodev_globals = {
54                 .devs                   = rte_crypto_devices,
55                 .data                   = { NULL },
56                 .nb_devs                = 0
57 };
58
59 /* spinlock for crypto device callbacks */
60 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
61
62 static const struct rte_cryptodev_capabilities
63                 cryptodev_undefined_capabilities[] = {
64                 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
65 };
66
67 static struct rte_cryptodev_capabilities
68                 *capability_copy[RTE_CRYPTO_MAX_DEVS];
69 static uint8_t is_capability_checked[RTE_CRYPTO_MAX_DEVS];
70
71 /**
72  * The user application callback description.
73  *
74  * It contains callback address to be registered by user application,
75  * the pointer to the parameters for callback, and the event type.
76  */
77 struct rte_cryptodev_callback {
78         TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
79         rte_cryptodev_cb_fn cb_fn;              /**< Callback address */
80         void *cb_arg;                           /**< Parameter for callback */
81         enum rte_cryptodev_event_type event;    /**< Interrupt event type */
82         uint32_t active;                        /**< Callback is executing */
83 };
84
85 /**
86  * The crypto cipher algorithm strings identifiers.
87  * It could be used in application command line.
88  */
89 const char *
90 rte_crypto_cipher_algorithm_strings[] = {
91         [RTE_CRYPTO_CIPHER_3DES_CBC]    = "3des-cbc",
92         [RTE_CRYPTO_CIPHER_3DES_ECB]    = "3des-ecb",
93         [RTE_CRYPTO_CIPHER_3DES_CTR]    = "3des-ctr",
94
95         [RTE_CRYPTO_CIPHER_AES_CBC]     = "aes-cbc",
96         [RTE_CRYPTO_CIPHER_AES_CTR]     = "aes-ctr",
97         [RTE_CRYPTO_CIPHER_AES_DOCSISBPI]       = "aes-docsisbpi",
98         [RTE_CRYPTO_CIPHER_AES_ECB]     = "aes-ecb",
99         [RTE_CRYPTO_CIPHER_AES_F8]      = "aes-f8",
100         [RTE_CRYPTO_CIPHER_AES_XTS]     = "aes-xts",
101
102         [RTE_CRYPTO_CIPHER_ARC4]        = "arc4",
103
104         [RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
105         [RTE_CRYPTO_CIPHER_DES_DOCSISBPI]       = "des-docsisbpi",
106
107         [RTE_CRYPTO_CIPHER_NULL]        = "null",
108
109         [RTE_CRYPTO_CIPHER_KASUMI_F8]   = "kasumi-f8",
110         [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
111         [RTE_CRYPTO_CIPHER_ZUC_EEA3]    = "zuc-eea3"
112 };
113
114 /**
115  * The crypto cipher operation strings identifiers.
116  * It could be used in application command line.
117  */
118 const char *
119 rte_crypto_cipher_operation_strings[] = {
120                 [RTE_CRYPTO_CIPHER_OP_ENCRYPT]  = "encrypt",
121                 [RTE_CRYPTO_CIPHER_OP_DECRYPT]  = "decrypt"
122 };
123
124 /**
125  * The crypto auth algorithm strings identifiers.
126  * It could be used in application command line.
127  */
128 const char *
129 rte_crypto_auth_algorithm_strings[] = {
130         [RTE_CRYPTO_AUTH_AES_CBC_MAC]   = "aes-cbc-mac",
131         [RTE_CRYPTO_AUTH_AES_CMAC]      = "aes-cmac",
132         [RTE_CRYPTO_AUTH_AES_GMAC]      = "aes-gmac",
133         [RTE_CRYPTO_AUTH_AES_XCBC_MAC]  = "aes-xcbc-mac",
134
135         [RTE_CRYPTO_AUTH_MD5]           = "md5",
136         [RTE_CRYPTO_AUTH_MD5_HMAC]      = "md5-hmac",
137
138         [RTE_CRYPTO_AUTH_NULL]          = "null",
139
140         [RTE_CRYPTO_AUTH_SHA1]          = "sha1",
141         [RTE_CRYPTO_AUTH_SHA1_HMAC]     = "sha1-hmac",
142
143         [RTE_CRYPTO_AUTH_SHA224]        = "sha2-224",
144         [RTE_CRYPTO_AUTH_SHA224_HMAC]   = "sha2-224-hmac",
145         [RTE_CRYPTO_AUTH_SHA256]        = "sha2-256",
146         [RTE_CRYPTO_AUTH_SHA256_HMAC]   = "sha2-256-hmac",
147         [RTE_CRYPTO_AUTH_SHA384]        = "sha2-384",
148         [RTE_CRYPTO_AUTH_SHA384_HMAC]   = "sha2-384-hmac",
149         [RTE_CRYPTO_AUTH_SHA512]        = "sha2-512",
150         [RTE_CRYPTO_AUTH_SHA512_HMAC]   = "sha2-512-hmac",
151
152         [RTE_CRYPTO_AUTH_KASUMI_F9]     = "kasumi-f9",
153         [RTE_CRYPTO_AUTH_SNOW3G_UIA2]   = "snow3g-uia2",
154         [RTE_CRYPTO_AUTH_ZUC_EIA3]      = "zuc-eia3"
155 };
156
157 /**
158  * The crypto AEAD algorithm strings identifiers.
159  * It could be used in application command line.
160  */
161 const char *
162 rte_crypto_aead_algorithm_strings[] = {
163         [RTE_CRYPTO_AEAD_AES_CCM]       = "aes-ccm",
164         [RTE_CRYPTO_AEAD_AES_GCM]       = "aes-gcm",
165         [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
166 };
167
168 /**
169  * The crypto AEAD operation strings identifiers.
170  * It could be used in application command line.
171  */
172 const char *
173 rte_crypto_aead_operation_strings[] = {
174         [RTE_CRYPTO_AEAD_OP_ENCRYPT]    = "encrypt",
175         [RTE_CRYPTO_AEAD_OP_DECRYPT]    = "decrypt"
176 };
177
178 /**
179  * Asymmetric crypto transform operation strings identifiers.
180  */
181 const char *rte_crypto_asym_xform_strings[] = {
182         [RTE_CRYPTO_ASYM_XFORM_NONE]    = "none",
183         [RTE_CRYPTO_ASYM_XFORM_RSA]     = "rsa",
184         [RTE_CRYPTO_ASYM_XFORM_MODEX]   = "modexp",
185         [RTE_CRYPTO_ASYM_XFORM_MODINV]  = "modinv",
186         [RTE_CRYPTO_ASYM_XFORM_DH]      = "dh",
187         [RTE_CRYPTO_ASYM_XFORM_DSA]     = "dsa",
188         [RTE_CRYPTO_ASYM_XFORM_ECDSA]   = "ecdsa",
189         [RTE_CRYPTO_ASYM_XFORM_ECPM]    = "ecpm",
190 };
191
192 /**
193  * Asymmetric crypto operation strings identifiers.
194  */
195 const char *rte_crypto_asym_op_strings[] = {
196         [RTE_CRYPTO_ASYM_OP_ENCRYPT]    = "encrypt",
197         [RTE_CRYPTO_ASYM_OP_DECRYPT]    = "decrypt",
198         [RTE_CRYPTO_ASYM_OP_SIGN]       = "sign",
199         [RTE_CRYPTO_ASYM_OP_VERIFY]     = "verify",
200         [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]       = "priv_key_generate",
201         [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
202         [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
203 };
204
205 /**
206  * The private data structure stored in the session mempool private data.
207  */
208 struct rte_cryptodev_sym_session_pool_private_data {
209         uint16_t nb_drivers;
210         /**< number of elements in sess_data array */
211         uint16_t user_data_sz;
212         /**< session user data will be placed after sess_data */
213 };
214
215 int
216 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
217                 const char *algo_string)
218 {
219         unsigned int i;
220
221         for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
222                 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
223                         *algo_enum = (enum rte_crypto_cipher_algorithm) i;
224                         return 0;
225                 }
226         }
227
228         /* Invalid string */
229         return -1;
230 }
231
232 int
233 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
234                 const char *algo_string)
235 {
236         unsigned int i;
237
238         for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
239                 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
240                         *algo_enum = (enum rte_crypto_auth_algorithm) i;
241                         return 0;
242                 }
243         }
244
245         /* Invalid string */
246         return -1;
247 }
248
249 int
250 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
251                 const char *algo_string)
252 {
253         unsigned int i;
254
255         for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
256                 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
257                         *algo_enum = (enum rte_crypto_aead_algorithm) i;
258                         return 0;
259                 }
260         }
261
262         /* Invalid string */
263         return -1;
264 }
265
266 int
267 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
268                 const char *xform_string)
269 {
270         unsigned int i;
271
272         for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
273                 if (strcmp(xform_string,
274                         rte_crypto_asym_xform_strings[i]) == 0) {
275                         *xform_enum = (enum rte_crypto_asym_xform_type) i;
276                         return 0;
277                 }
278         }
279
280         /* Invalid string */
281         return -1;
282 }
283
284 /**
285  * The crypto auth operation strings identifiers.
286  * It could be used in application command line.
287  */
288 const char *
289 rte_crypto_auth_operation_strings[] = {
290                 [RTE_CRYPTO_AUTH_OP_VERIFY]     = "verify",
291                 [RTE_CRYPTO_AUTH_OP_GENERATE]   = "generate"
292 };
293
294 const struct rte_cryptodev_symmetric_capability __vsym *
295 rte_cryptodev_sym_capability_get_v20(uint8_t dev_id,
296                 const struct rte_cryptodev_sym_capability_idx *idx)
297 {
298         const struct rte_cryptodev_capabilities *capability;
299         struct rte_cryptodev_info dev_info;
300         int i = 0;
301
302         rte_cryptodev_info_get_v20(dev_id, &dev_info);
303
304         while ((capability = &dev_info.capabilities[i++])->op !=
305                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
306                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
307                         continue;
308
309                 if (capability->sym.xform_type != idx->type)
310                         continue;
311
312                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
313                         capability->sym.auth.algo == idx->algo.auth)
314                         return &capability->sym;
315
316                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
317                         capability->sym.cipher.algo == idx->algo.cipher)
318                         return &capability->sym;
319
320                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
321                                 capability->sym.aead.algo == idx->algo.aead)
322                         return &capability->sym;
323         }
324
325         return NULL;
326 }
327 VERSION_SYMBOL(rte_cryptodev_sym_capability_get, _v20, 20.0);
328
329 const struct rte_cryptodev_symmetric_capability __vsym *
330 rte_cryptodev_sym_capability_get_v21(uint8_t dev_id,
331                 const struct rte_cryptodev_sym_capability_idx *idx)
332 {
333         const struct rte_cryptodev_capabilities *capability;
334         struct rte_cryptodev_info dev_info;
335         int i = 0;
336
337         rte_cryptodev_info_get(dev_id, &dev_info);
338
339         while ((capability = &dev_info.capabilities[i++])->op !=
340                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
341                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
342                         continue;
343
344                 if (capability->sym.xform_type != idx->type)
345                         continue;
346
347                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
348                         capability->sym.auth.algo == idx->algo.auth)
349                         return &capability->sym;
350
351                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
352                         capability->sym.cipher.algo == idx->algo.cipher)
353                         return &capability->sym;
354
355                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
356                                 capability->sym.aead.algo == idx->algo.aead)
357                         return &capability->sym;
358         }
359
360         return NULL;
361 }
362 MAP_STATIC_SYMBOL(const struct rte_cryptodev_symmetric_capability *
363                 rte_cryptodev_sym_capability_get(uint8_t dev_id,
364                 const struct rte_cryptodev_sym_capability_idx *idx),
365                 rte_cryptodev_sym_capability_get_v21);
366 BIND_DEFAULT_SYMBOL(rte_cryptodev_sym_capability_get, _v21, 21);
367
368 static int
369 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
370 {
371         unsigned int next_size;
372
373         /* Check lower/upper bounds */
374         if (size < range->min)
375                 return -1;
376
377         if (size > range->max)
378                 return -1;
379
380         /* If range is actually only one value, size is correct */
381         if (range->increment == 0)
382                 return 0;
383
384         /* Check if value is one of the supported sizes */
385         for (next_size = range->min; next_size <= range->max;
386                         next_size += range->increment)
387                 if (size == next_size)
388                         return 0;
389
390         return -1;
391 }
392
393 const struct rte_cryptodev_asymmetric_xform_capability *
394 rte_cryptodev_asym_capability_get(uint8_t dev_id,
395                 const struct rte_cryptodev_asym_capability_idx *idx)
396 {
397         const struct rte_cryptodev_capabilities *capability;
398         struct rte_cryptodev_info dev_info;
399         unsigned int i = 0;
400
401         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
402         rte_cryptodev_info_get(dev_id, &dev_info);
403
404         while ((capability = &dev_info.capabilities[i++])->op !=
405                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
406                 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
407                         continue;
408
409                 if (capability->asym.xform_capa.xform_type == idx->type)
410                         return &capability->asym.xform_capa;
411         }
412         return NULL;
413 };
414
415 int
416 rte_cryptodev_sym_capability_check_cipher(
417                 const struct rte_cryptodev_symmetric_capability *capability,
418                 uint16_t key_size, uint16_t iv_size)
419 {
420         if (param_range_check(key_size, &capability->cipher.key_size) != 0)
421                 return -1;
422
423         if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
424                 return -1;
425
426         return 0;
427 }
428
429 int
430 rte_cryptodev_sym_capability_check_auth(
431                 const struct rte_cryptodev_symmetric_capability *capability,
432                 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
433 {
434         if (param_range_check(key_size, &capability->auth.key_size) != 0)
435                 return -1;
436
437         if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
438                 return -1;
439
440         if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
441                 return -1;
442
443         return 0;
444 }
445
446 int
447 rte_cryptodev_sym_capability_check_aead(
448                 const struct rte_cryptodev_symmetric_capability *capability,
449                 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
450                 uint16_t iv_size)
451 {
452         if (param_range_check(key_size, &capability->aead.key_size) != 0)
453                 return -1;
454
455         if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
456                 return -1;
457
458         if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
459                 return -1;
460
461         if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
462                 return -1;
463
464         return 0;
465 }
466 int
467 rte_cryptodev_asym_xform_capability_check_optype(
468         const struct rte_cryptodev_asymmetric_xform_capability *capability,
469         enum rte_crypto_asym_op_type op_type)
470 {
471         if (capability->op_types & (1 << op_type))
472                 return 1;
473
474         return 0;
475 }
476
477 int
478 rte_cryptodev_asym_xform_capability_check_modlen(
479         const struct rte_cryptodev_asymmetric_xform_capability *capability,
480         uint16_t modlen)
481 {
482         /* no need to check for limits, if min or max = 0 */
483         if (capability->modlen.min != 0) {
484                 if (modlen < capability->modlen.min)
485                         return -1;
486         }
487
488         if (capability->modlen.max != 0) {
489                 if (modlen > capability->modlen.max)
490                         return -1;
491         }
492
493         /* in any case, check if given modlen is module increment */
494         if (capability->modlen.increment != 0) {
495                 if (modlen % (capability->modlen.increment))
496                         return -1;
497         }
498
499         return 0;
500 }
501
502
503 const char *
504 rte_cryptodev_get_feature_name(uint64_t flag)
505 {
506         switch (flag) {
507         case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
508                 return "SYMMETRIC_CRYPTO";
509         case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
510                 return "ASYMMETRIC_CRYPTO";
511         case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
512                 return "SYM_OPERATION_CHAINING";
513         case RTE_CRYPTODEV_FF_CPU_SSE:
514                 return "CPU_SSE";
515         case RTE_CRYPTODEV_FF_CPU_AVX:
516                 return "CPU_AVX";
517         case RTE_CRYPTODEV_FF_CPU_AVX2:
518                 return "CPU_AVX2";
519         case RTE_CRYPTODEV_FF_CPU_AVX512:
520                 return "CPU_AVX512";
521         case RTE_CRYPTODEV_FF_CPU_AESNI:
522                 return "CPU_AESNI";
523         case RTE_CRYPTODEV_FF_HW_ACCELERATED:
524                 return "HW_ACCELERATED";
525         case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
526                 return "IN_PLACE_SGL";
527         case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
528                 return "OOP_SGL_IN_SGL_OUT";
529         case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
530                 return "OOP_SGL_IN_LB_OUT";
531         case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
532                 return "OOP_LB_IN_SGL_OUT";
533         case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
534                 return "OOP_LB_IN_LB_OUT";
535         case RTE_CRYPTODEV_FF_CPU_NEON:
536                 return "CPU_NEON";
537         case RTE_CRYPTODEV_FF_CPU_ARM_CE:
538                 return "CPU_ARM_CE";
539         case RTE_CRYPTODEV_FF_SECURITY:
540                 return "SECURITY_PROTOCOL";
541         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
542                 return "RSA_PRIV_OP_KEY_EXP";
543         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
544                 return "RSA_PRIV_OP_KEY_QT";
545         case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
546                 return "DIGEST_ENCRYPTED";
547         case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
548                 return "SYM_CPU_CRYPTO";
549         case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
550                 return "ASYM_SESSIONLESS";
551         case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
552                 return "SYM_SESSIONLESS";
553         case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
554                 return "NON_BYTE_ALIGNED_DATA";
555         default:
556                 return NULL;
557         }
558 }
559
560 struct rte_cryptodev *
561 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
562 {
563         return &cryptodev_globals.devs[dev_id];
564 }
565
566 struct rte_cryptodev *
567 rte_cryptodev_pmd_get_named_dev(const char *name)
568 {
569         struct rte_cryptodev *dev;
570         unsigned int i;
571
572         if (name == NULL)
573                 return NULL;
574
575         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
576                 dev = &cryptodev_globals.devs[i];
577
578                 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
579                                 (strcmp(dev->data->name, name) == 0))
580                         return dev;
581         }
582
583         return NULL;
584 }
585
586 static inline uint8_t
587 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
588 {
589         if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
590                         rte_crypto_devices[dev_id].data == NULL)
591                 return 0;
592
593         return 1;
594 }
595
596 unsigned int
597 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
598 {
599         struct rte_cryptodev *dev = NULL;
600
601         if (!rte_cryptodev_is_valid_device_data(dev_id))
602                 return 0;
603
604         dev = rte_cryptodev_pmd_get_dev(dev_id);
605         if (dev->attached != RTE_CRYPTODEV_ATTACHED)
606                 return 0;
607         else
608                 return 1;
609 }
610
611
612 int
613 rte_cryptodev_get_dev_id(const char *name)
614 {
615         unsigned i;
616
617         if (name == NULL)
618                 return -1;
619
620         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
621                 if (!rte_cryptodev_is_valid_device_data(i))
622                         continue;
623                 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
624                                 == 0) &&
625                                 (cryptodev_globals.devs[i].attached ==
626                                                 RTE_CRYPTODEV_ATTACHED))
627                         return i;
628         }
629
630         return -1;
631 }
632
633 uint8_t
634 rte_cryptodev_count(void)
635 {
636         return cryptodev_globals.nb_devs;
637 }
638
639 uint8_t
640 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
641 {
642         uint8_t i, dev_count = 0;
643
644         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
645                 if (cryptodev_globals.devs[i].driver_id == driver_id &&
646                         cryptodev_globals.devs[i].attached ==
647                                         RTE_CRYPTODEV_ATTACHED)
648                         dev_count++;
649
650         return dev_count;
651 }
652
653 uint8_t
654 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
655         uint8_t nb_devices)
656 {
657         uint8_t i, count = 0;
658         struct rte_cryptodev *devs = cryptodev_globals.devs;
659
660         for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
661                 if (!rte_cryptodev_is_valid_device_data(i))
662                         continue;
663
664                 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
665                         int cmp;
666
667                         cmp = strncmp(devs[i].device->driver->name,
668                                         driver_name,
669                                         strlen(driver_name) + 1);
670
671                         if (cmp == 0)
672                                 devices[count++] = devs[i].data->dev_id;
673                 }
674         }
675
676         return count;
677 }
678
679 void *
680 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
681 {
682         if (dev_id < RTE_CRYPTO_MAX_DEVS &&
683                         (rte_crypto_devices[dev_id].feature_flags &
684                         RTE_CRYPTODEV_FF_SECURITY))
685                 return rte_crypto_devices[dev_id].security_ctx;
686
687         return NULL;
688 }
689
690 int
691 rte_cryptodev_socket_id(uint8_t dev_id)
692 {
693         struct rte_cryptodev *dev;
694
695         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
696                 return -1;
697
698         dev = rte_cryptodev_pmd_get_dev(dev_id);
699
700         return dev->data->socket_id;
701 }
702
703 static inline int
704 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
705                 int socket_id)
706 {
707         char mz_name[RTE_MEMZONE_NAMESIZE];
708         const struct rte_memzone *mz;
709         int n;
710
711         /* generate memzone name */
712         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
713         if (n >= (int)sizeof(mz_name))
714                 return -EINVAL;
715
716         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
717                 mz = rte_memzone_reserve(mz_name,
718                                 sizeof(struct rte_cryptodev_data),
719                                 socket_id, 0);
720         } else
721                 mz = rte_memzone_lookup(mz_name);
722
723         if (mz == NULL)
724                 return -ENOMEM;
725
726         *data = mz->addr;
727         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
728                 memset(*data, 0, sizeof(struct rte_cryptodev_data));
729
730         return 0;
731 }
732
733 static inline int
734 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
735 {
736         char mz_name[RTE_MEMZONE_NAMESIZE];
737         const struct rte_memzone *mz;
738         int n;
739
740         /* generate memzone name */
741         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
742         if (n >= (int)sizeof(mz_name))
743                 return -EINVAL;
744
745         mz = rte_memzone_lookup(mz_name);
746         if (mz == NULL)
747                 return -ENOMEM;
748
749         RTE_ASSERT(*data == mz->addr);
750         *data = NULL;
751
752         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
753                 return rte_memzone_free(mz);
754
755         return 0;
756 }
757
758 static uint8_t
759 rte_cryptodev_find_free_device_index(void)
760 {
761         uint8_t dev_id;
762
763         for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
764                 if (rte_crypto_devices[dev_id].attached ==
765                                 RTE_CRYPTODEV_DETACHED)
766                         return dev_id;
767         }
768         return RTE_CRYPTO_MAX_DEVS;
769 }
770
771 struct rte_cryptodev *
772 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
773 {
774         struct rte_cryptodev *cryptodev;
775         uint8_t dev_id;
776
777         if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
778                 CDEV_LOG_ERR("Crypto device with name %s already "
779                                 "allocated!", name);
780                 return NULL;
781         }
782
783         dev_id = rte_cryptodev_find_free_device_index();
784         if (dev_id == RTE_CRYPTO_MAX_DEVS) {
785                 CDEV_LOG_ERR("Reached maximum number of crypto devices");
786                 return NULL;
787         }
788
789         cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
790
791         if (cryptodev->data == NULL) {
792                 struct rte_cryptodev_data **cryptodev_data =
793                                 &cryptodev_globals.data[dev_id];
794
795                 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
796                                 socket_id);
797
798                 if (retval < 0 || *cryptodev_data == NULL)
799                         return NULL;
800
801                 cryptodev->data = *cryptodev_data;
802
803                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
804                         strlcpy(cryptodev->data->name, name,
805                                 RTE_CRYPTODEV_NAME_MAX_LEN);
806
807                         cryptodev->data->dev_id = dev_id;
808                         cryptodev->data->socket_id = socket_id;
809                         cryptodev->data->dev_started = 0;
810                 }
811
812                 /* init user callbacks */
813                 TAILQ_INIT(&(cryptodev->link_intr_cbs));
814
815                 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
816
817                 cryptodev_globals.nb_devs++;
818         }
819
820         return cryptodev;
821 }
822
823 int
824 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
825 {
826         int ret;
827         uint8_t dev_id;
828
829         if (cryptodev == NULL)
830                 return -EINVAL;
831
832         dev_id = cryptodev->data->dev_id;
833
834         /* Close device only if device operations have been set */
835         if (cryptodev->dev_ops) {
836                 ret = rte_cryptodev_close(dev_id);
837                 if (ret < 0)
838                         return ret;
839         }
840
841         ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
842         if (ret < 0)
843                 return ret;
844
845         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
846         cryptodev_globals.nb_devs--;
847         return 0;
848 }
849
850 uint16_t
851 rte_cryptodev_queue_pair_count(uint8_t dev_id)
852 {
853         struct rte_cryptodev *dev;
854
855         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
856                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
857                 return 0;
858         }
859
860         dev = &rte_crypto_devices[dev_id];
861         return dev->data->nb_queue_pairs;
862 }
863
864 static int
865 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
866                 int socket_id)
867 {
868         struct rte_cryptodev_info dev_info;
869         void **qp;
870         unsigned i;
871
872         if ((dev == NULL) || (nb_qpairs < 1)) {
873                 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
874                                                         dev, nb_qpairs);
875                 return -EINVAL;
876         }
877
878         CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
879                         nb_qpairs, dev->data->dev_id);
880
881         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
882
883         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
884         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
885
886         if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
887                 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
888                                 nb_qpairs, dev->data->dev_id);
889             return -EINVAL;
890         }
891
892         if (dev->data->queue_pairs == NULL) { /* first time configuration */
893                 dev->data->queue_pairs = rte_zmalloc_socket(
894                                 "cryptodev->queue_pairs",
895                                 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
896                                 RTE_CACHE_LINE_SIZE, socket_id);
897
898                 if (dev->data->queue_pairs == NULL) {
899                         dev->data->nb_queue_pairs = 0;
900                         CDEV_LOG_ERR("failed to get memory for qp meta data, "
901                                                         "nb_queues %u",
902                                                         nb_qpairs);
903                         return -(ENOMEM);
904                 }
905         } else { /* re-configure */
906                 int ret;
907                 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
908
909                 qp = dev->data->queue_pairs;
910
911                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
912                                 -ENOTSUP);
913
914                 for (i = nb_qpairs; i < old_nb_queues; i++) {
915                         ret = (*dev->dev_ops->queue_pair_release)(dev, i);
916                         if (ret < 0)
917                                 return ret;
918                 }
919
920                 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
921                                 RTE_CACHE_LINE_SIZE);
922                 if (qp == NULL) {
923                         CDEV_LOG_ERR("failed to realloc qp meta data,"
924                                                 " nb_queues %u", nb_qpairs);
925                         return -(ENOMEM);
926                 }
927
928                 if (nb_qpairs > old_nb_queues) {
929                         uint16_t new_qs = nb_qpairs - old_nb_queues;
930
931                         memset(qp + old_nb_queues, 0,
932                                 sizeof(qp[0]) * new_qs);
933                 }
934
935                 dev->data->queue_pairs = qp;
936
937         }
938         dev->data->nb_queue_pairs = nb_qpairs;
939         return 0;
940 }
941
942 int
943 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
944 {
945         struct rte_cryptodev *dev;
946         int diag;
947
948         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
949                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
950                 return -EINVAL;
951         }
952
953         dev = &rte_crypto_devices[dev_id];
954
955         if (dev->data->dev_started) {
956                 CDEV_LOG_ERR(
957                     "device %d must be stopped to allow configuration", dev_id);
958                 return -EBUSY;
959         }
960
961         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
962
963         /* Setup new number of queue pairs and reconfigure device. */
964         diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
965                         config->socket_id);
966         if (diag != 0) {
967                 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
968                                 dev_id, diag);
969                 return diag;
970         }
971
972         rte_cryptodev_trace_configure(dev_id, config);
973         return (*dev->dev_ops->dev_configure)(dev, config);
974 }
975
976
977 int
978 rte_cryptodev_start(uint8_t dev_id)
979 {
980         struct rte_cryptodev *dev;
981         int diag;
982
983         CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
984
985         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
986                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
987                 return -EINVAL;
988         }
989
990         dev = &rte_crypto_devices[dev_id];
991
992         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
993
994         if (dev->data->dev_started != 0) {
995                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
996                         dev_id);
997                 return 0;
998         }
999
1000         diag = (*dev->dev_ops->dev_start)(dev);
1001         rte_cryptodev_trace_start(dev_id, diag);
1002         if (diag == 0)
1003                 dev->data->dev_started = 1;
1004         else
1005                 return diag;
1006
1007         return 0;
1008 }
1009
1010 void
1011 rte_cryptodev_stop(uint8_t dev_id)
1012 {
1013         struct rte_cryptodev *dev;
1014
1015         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1016                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1017                 return;
1018         }
1019
1020         dev = &rte_crypto_devices[dev_id];
1021
1022         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1023
1024         if (dev->data->dev_started == 0) {
1025                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1026                         dev_id);
1027                 return;
1028         }
1029
1030         (*dev->dev_ops->dev_stop)(dev);
1031         rte_cryptodev_trace_stop(dev_id);
1032         dev->data->dev_started = 0;
1033 }
1034
1035 int
1036 rte_cryptodev_close(uint8_t dev_id)
1037 {
1038         struct rte_cryptodev *dev;
1039         int retval;
1040
1041         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1042                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1043                 return -1;
1044         }
1045
1046         dev = &rte_crypto_devices[dev_id];
1047
1048         /* Device must be stopped before it can be closed */
1049         if (dev->data->dev_started == 1) {
1050                 CDEV_LOG_ERR("Device %u must be stopped before closing",
1051                                 dev_id);
1052                 return -EBUSY;
1053         }
1054
1055         /* We can't close the device if there are outstanding sessions in use */
1056         if (dev->data->session_pool != NULL) {
1057                 if (!rte_mempool_full(dev->data->session_pool)) {
1058                         CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1059                                         "has sessions still in use, free "
1060                                         "all sessions before calling close",
1061                                         (unsigned)dev_id);
1062                         return -EBUSY;
1063                 }
1064         }
1065
1066         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1067         retval = (*dev->dev_ops->dev_close)(dev);
1068         rte_cryptodev_trace_close(dev_id, retval);
1069
1070         if (capability_copy[dev_id]) {
1071                 free(capability_copy[dev_id]);
1072                 capability_copy[dev_id] = NULL;
1073         }
1074         is_capability_checked[dev_id] = 0;
1075
1076         if (retval < 0)
1077                 return retval;
1078
1079         return 0;
1080 }
1081
1082 int
1083 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1084                 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1085
1086 {
1087         struct rte_cryptodev *dev;
1088
1089         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1090                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1091                 return -EINVAL;
1092         }
1093
1094         dev = &rte_crypto_devices[dev_id];
1095         if (queue_pair_id >= dev->data->nb_queue_pairs) {
1096                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1097                 return -EINVAL;
1098         }
1099
1100         if (!qp_conf) {
1101                 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1102                 return -EINVAL;
1103         }
1104
1105         if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1106                         (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1107                 CDEV_LOG_ERR("Invalid mempools\n");
1108                 return -EINVAL;
1109         }
1110
1111         if (qp_conf->mp_session) {
1112                 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1113                 uint32_t obj_size = qp_conf->mp_session->elt_size;
1114                 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1115                 struct rte_cryptodev_sym_session s = {0};
1116
1117                 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1118                 if (!pool_priv || qp_conf->mp_session->private_data_size <
1119                                 sizeof(*pool_priv)) {
1120                         CDEV_LOG_ERR("Invalid mempool\n");
1121                         return -EINVAL;
1122                 }
1123
1124                 s.nb_drivers = pool_priv->nb_drivers;
1125                 s.user_data_sz = pool_priv->user_data_sz;
1126
1127                 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1128                         obj_size) || (s.nb_drivers <= dev->driver_id) ||
1129                         rte_cryptodev_sym_get_private_session_size(dev_id) >
1130                                 obj_priv_size) {
1131                         CDEV_LOG_ERR("Invalid mempool\n");
1132                         return -EINVAL;
1133                 }
1134         }
1135
1136         if (dev->data->dev_started) {
1137                 CDEV_LOG_ERR(
1138                     "device %d must be stopped to allow configuration", dev_id);
1139                 return -EBUSY;
1140         }
1141
1142         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1143
1144         rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1145         return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1146                         socket_id);
1147 }
1148
1149
1150 int
1151 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1152 {
1153         struct rte_cryptodev *dev;
1154
1155         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1156                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1157                 return -ENODEV;
1158         }
1159
1160         if (stats == NULL) {
1161                 CDEV_LOG_ERR("Invalid stats ptr");
1162                 return -EINVAL;
1163         }
1164
1165         dev = &rte_crypto_devices[dev_id];
1166         memset(stats, 0, sizeof(*stats));
1167
1168         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1169         (*dev->dev_ops->stats_get)(dev, stats);
1170         return 0;
1171 }
1172
1173 void
1174 rte_cryptodev_stats_reset(uint8_t dev_id)
1175 {
1176         struct rte_cryptodev *dev;
1177
1178         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1179                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1180                 return;
1181         }
1182
1183         dev = &rte_crypto_devices[dev_id];
1184
1185         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1186         (*dev->dev_ops->stats_reset)(dev);
1187 }
1188
1189 static void
1190 get_v20_capabilities(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1191 {
1192         const struct rte_cryptodev_capabilities *capability;
1193         uint8_t found_invalid_capa = 0;
1194         uint8_t counter = 0;
1195
1196         for (capability = dev_info->capabilities;
1197                         capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
1198                         ++capability, ++counter) {
1199                 if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
1200                                 capability->sym.xform_type ==
1201                                         RTE_CRYPTO_SYM_XFORM_AEAD
1202                                 && capability->sym.aead.algo >=
1203                                 RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
1204                         found_invalid_capa = 1;
1205                         counter--;
1206                 }
1207         }
1208         is_capability_checked[dev_id] = 1;
1209         if (!found_invalid_capa)
1210                 return;
1211         capability_copy[dev_id] = malloc(counter *
1212                 sizeof(struct rte_cryptodev_capabilities));
1213         if (capability_copy[dev_id] == NULL) {
1214                  /*
1215                   * error case - no memory to store the trimmed
1216                   * list, so have to return an empty list
1217                   */
1218                 dev_info->capabilities =
1219                         cryptodev_undefined_capabilities;
1220                 is_capability_checked[dev_id] = 0;
1221         } else {
1222                 counter = 0;
1223                 for (capability = dev_info->capabilities;
1224                                 capability->op !=
1225                                 RTE_CRYPTO_OP_TYPE_UNDEFINED;
1226                                 capability++) {
1227                         if (!(capability->op ==
1228                                 RTE_CRYPTO_OP_TYPE_SYMMETRIC
1229                                 && capability->sym.xform_type ==
1230                                 RTE_CRYPTO_SYM_XFORM_AEAD
1231                                 && capability->sym.aead.algo >=
1232                                 RTE_CRYPTO_AEAD_CHACHA20_POLY1305)) {
1233                                 capability_copy[dev_id][counter++] =
1234                                                 *capability;
1235                         }
1236                 }
1237                 dev_info->capabilities =
1238                                 capability_copy[dev_id];
1239         }
1240 }
1241
1242 void __vsym
1243 rte_cryptodev_info_get_v20(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1244 {
1245         struct rte_cryptodev *dev;
1246
1247         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1248                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1249                 return;
1250         }
1251
1252         dev = &rte_crypto_devices[dev_id];
1253
1254         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1255
1256         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1257         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1258
1259         if (capability_copy[dev_id] == NULL) {
1260                 if (!is_capability_checked[dev_id])
1261                         get_v20_capabilities(dev_id, dev_info);
1262         } else
1263                 dev_info->capabilities = capability_copy[dev_id];
1264
1265         dev_info->driver_name = dev->device->driver->name;
1266         dev_info->device = dev->device;
1267 }
1268 VERSION_SYMBOL(rte_cryptodev_info_get, _v20, 20.0);
1269
1270 void __vsym
1271 rte_cryptodev_info_get_v21(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1272 {
1273         struct rte_cryptodev *dev;
1274
1275         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1276                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1277                 return;
1278         }
1279
1280         dev = &rte_crypto_devices[dev_id];
1281
1282         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1283
1284         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1285         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1286
1287         dev_info->driver_name = dev->device->driver->name;
1288         dev_info->device = dev->device;
1289 }
1290 MAP_STATIC_SYMBOL(void rte_cryptodev_info_get(uint8_t dev_id,
1291         struct rte_cryptodev_info *dev_info), rte_cryptodev_info_get_v21);
1292 BIND_DEFAULT_SYMBOL(rte_cryptodev_info_get, _v21, 21);
1293
1294 int
1295 rte_cryptodev_callback_register(uint8_t dev_id,
1296                         enum rte_cryptodev_event_type event,
1297                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1298 {
1299         struct rte_cryptodev *dev;
1300         struct rte_cryptodev_callback *user_cb;
1301
1302         if (!cb_fn)
1303                 return -EINVAL;
1304
1305         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1306                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1307                 return -EINVAL;
1308         }
1309
1310         dev = &rte_crypto_devices[dev_id];
1311         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1312
1313         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1314                 if (user_cb->cb_fn == cb_fn &&
1315                         user_cb->cb_arg == cb_arg &&
1316                         user_cb->event == event) {
1317                         break;
1318                 }
1319         }
1320
1321         /* create a new callback. */
1322         if (user_cb == NULL) {
1323                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1324                                 sizeof(struct rte_cryptodev_callback), 0);
1325                 if (user_cb != NULL) {
1326                         user_cb->cb_fn = cb_fn;
1327                         user_cb->cb_arg = cb_arg;
1328                         user_cb->event = event;
1329                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1330                 }
1331         }
1332
1333         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1334         return (user_cb == NULL) ? -ENOMEM : 0;
1335 }
1336
1337 int
1338 rte_cryptodev_callback_unregister(uint8_t dev_id,
1339                         enum rte_cryptodev_event_type event,
1340                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1341 {
1342         int ret;
1343         struct rte_cryptodev *dev;
1344         struct rte_cryptodev_callback *cb, *next;
1345
1346         if (!cb_fn)
1347                 return -EINVAL;
1348
1349         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1350                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1351                 return -EINVAL;
1352         }
1353
1354         dev = &rte_crypto_devices[dev_id];
1355         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1356
1357         ret = 0;
1358         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1359
1360                 next = TAILQ_NEXT(cb, next);
1361
1362                 if (cb->cb_fn != cb_fn || cb->event != event ||
1363                                 (cb->cb_arg != (void *)-1 &&
1364                                 cb->cb_arg != cb_arg))
1365                         continue;
1366
1367                 /*
1368                  * if this callback is not executing right now,
1369                  * then remove it.
1370                  */
1371                 if (cb->active == 0) {
1372                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1373                         rte_free(cb);
1374                 } else {
1375                         ret = -EAGAIN;
1376                 }
1377         }
1378
1379         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1380         return ret;
1381 }
1382
1383 void
1384 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1385         enum rte_cryptodev_event_type event)
1386 {
1387         struct rte_cryptodev_callback *cb_lst;
1388         struct rte_cryptodev_callback dev_cb;
1389
1390         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1391         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1392                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1393                         continue;
1394                 dev_cb = *cb_lst;
1395                 cb_lst->active = 1;
1396                 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1397                 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1398                                                 dev_cb.cb_arg);
1399                 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1400                 cb_lst->active = 0;
1401         }
1402         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1403 }
1404
1405
1406 int
1407 rte_cryptodev_sym_session_init(uint8_t dev_id,
1408                 struct rte_cryptodev_sym_session *sess,
1409                 struct rte_crypto_sym_xform *xforms,
1410                 struct rte_mempool *mp)
1411 {
1412         struct rte_cryptodev *dev;
1413         uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1414                         dev_id);
1415         uint8_t index;
1416         int ret;
1417
1418         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1419                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1420                 return -EINVAL;
1421         }
1422
1423         dev = rte_cryptodev_pmd_get_dev(dev_id);
1424
1425         if (sess == NULL || xforms == NULL || dev == NULL)
1426                 return -EINVAL;
1427
1428         if (mp->elt_size < sess_priv_sz)
1429                 return -EINVAL;
1430
1431         index = dev->driver_id;
1432         if (index >= sess->nb_drivers)
1433                 return -EINVAL;
1434
1435         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1436
1437         if (sess->sess_data[index].refcnt == 0) {
1438                 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1439                                                         sess, mp);
1440                 if (ret < 0) {
1441                         CDEV_LOG_ERR(
1442                                 "dev_id %d failed to configure session details",
1443                                 dev_id);
1444                         return ret;
1445                 }
1446         }
1447
1448         rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1449         sess->sess_data[index].refcnt++;
1450         return 0;
1451 }
1452
1453 int
1454 rte_cryptodev_asym_session_init(uint8_t dev_id,
1455                 struct rte_cryptodev_asym_session *sess,
1456                 struct rte_crypto_asym_xform *xforms,
1457                 struct rte_mempool *mp)
1458 {
1459         struct rte_cryptodev *dev;
1460         uint8_t index;
1461         int ret;
1462
1463         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1464                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1465                 return -EINVAL;
1466         }
1467
1468         dev = rte_cryptodev_pmd_get_dev(dev_id);
1469
1470         if (sess == NULL || xforms == NULL || dev == NULL)
1471                 return -EINVAL;
1472
1473         index = dev->driver_id;
1474
1475         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1476                                 -ENOTSUP);
1477
1478         if (sess->sess_private_data[index] == NULL) {
1479                 ret = dev->dev_ops->asym_session_configure(dev,
1480                                                         xforms,
1481                                                         sess, mp);
1482                 if (ret < 0) {
1483                         CDEV_LOG_ERR(
1484                                 "dev_id %d failed to configure session details",
1485                                 dev_id);
1486                         return ret;
1487                 }
1488         }
1489
1490         rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1491         return 0;
1492 }
1493
1494 struct rte_mempool *
1495 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1496         uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1497         int socket_id)
1498 {
1499         struct rte_mempool *mp;
1500         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1501         uint32_t obj_sz;
1502
1503         obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1504         if (obj_sz > elt_size)
1505                 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1506                                 obj_sz);
1507         else
1508                 obj_sz = elt_size;
1509
1510         mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1511                         (uint32_t)(sizeof(*pool_priv)),
1512                         NULL, NULL, NULL, NULL,
1513                         socket_id, 0);
1514         if (mp == NULL) {
1515                 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1516                         __func__, name, rte_errno);
1517                 return NULL;
1518         }
1519
1520         pool_priv = rte_mempool_get_priv(mp);
1521         if (!pool_priv) {
1522                 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1523                         __func__, name);
1524                 rte_mempool_free(mp);
1525                 return NULL;
1526         }
1527
1528         pool_priv->nb_drivers = nb_drivers;
1529         pool_priv->user_data_sz = user_data_size;
1530
1531         rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1532                 elt_size, cache_size, user_data_size, mp);
1533         return mp;
1534 }
1535
1536 static unsigned int
1537 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1538 {
1539         return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1540                         sess->user_data_sz;
1541 }
1542
1543 struct rte_cryptodev_sym_session *
1544 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1545 {
1546         struct rte_cryptodev_sym_session *sess;
1547         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1548
1549         if (!mp) {
1550                 CDEV_LOG_ERR("Invalid mempool\n");
1551                 return NULL;
1552         }
1553
1554         pool_priv = rte_mempool_get_priv(mp);
1555
1556         if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) {
1557                 CDEV_LOG_ERR("Invalid mempool\n");
1558                 return NULL;
1559         }
1560
1561         /* Allocate a session structure from the session pool */
1562         if (rte_mempool_get(mp, (void **)&sess)) {
1563                 CDEV_LOG_ERR("couldn't get object from session mempool");
1564                 return NULL;
1565         }
1566
1567         sess->nb_drivers = pool_priv->nb_drivers;
1568         sess->user_data_sz = pool_priv->user_data_sz;
1569         sess->opaque_data = 0;
1570
1571         /* Clear device session pointer.
1572          * Include the flag indicating presence of user data
1573          */
1574         memset(sess->sess_data, 0,
1575                         rte_cryptodev_sym_session_data_size(sess));
1576
1577         rte_cryptodev_trace_sym_session_create(mp, sess);
1578         return sess;
1579 }
1580
1581 struct rte_cryptodev_asym_session *
1582 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1583 {
1584         struct rte_cryptodev_asym_session *sess;
1585
1586         /* Allocate a session structure from the session pool */
1587         if (rte_mempool_get(mp, (void **)&sess)) {
1588                 CDEV_LOG_ERR("couldn't get object from session mempool");
1589                 return NULL;
1590         }
1591
1592         /* Clear device session pointer.
1593          * Include the flag indicating presence of private data
1594          */
1595         memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1596
1597         rte_cryptodev_trace_asym_session_create(mp, sess);
1598         return sess;
1599 }
1600
1601 int
1602 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1603                 struct rte_cryptodev_sym_session *sess)
1604 {
1605         struct rte_cryptodev *dev;
1606         uint8_t driver_id;
1607
1608         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1609                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1610                 return -EINVAL;
1611         }
1612
1613         dev = rte_cryptodev_pmd_get_dev(dev_id);
1614
1615         if (dev == NULL || sess == NULL)
1616                 return -EINVAL;
1617
1618         driver_id = dev->driver_id;
1619         if (sess->sess_data[driver_id].refcnt == 0)
1620                 return 0;
1621         if (--sess->sess_data[driver_id].refcnt != 0)
1622                 return -EBUSY;
1623
1624         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1625
1626         dev->dev_ops->sym_session_clear(dev, sess);
1627
1628         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1629         return 0;
1630 }
1631
1632 int
1633 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1634                 struct rte_cryptodev_asym_session *sess)
1635 {
1636         struct rte_cryptodev *dev;
1637
1638         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1639                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1640                 return -EINVAL;
1641         }
1642
1643         dev = rte_cryptodev_pmd_get_dev(dev_id);
1644
1645         if (dev == NULL || sess == NULL)
1646                 return -EINVAL;
1647
1648         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1649
1650         dev->dev_ops->asym_session_clear(dev, sess);
1651
1652         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1653         return 0;
1654 }
1655
1656 int
1657 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1658 {
1659         uint8_t i;
1660         struct rte_mempool *sess_mp;
1661
1662         if (sess == NULL)
1663                 return -EINVAL;
1664
1665         /* Check that all device private data has been freed */
1666         for (i = 0; i < sess->nb_drivers; i++) {
1667                 if (sess->sess_data[i].refcnt != 0)
1668                         return -EBUSY;
1669         }
1670
1671         /* Return session to mempool */
1672         sess_mp = rte_mempool_from_obj(sess);
1673         rte_mempool_put(sess_mp, sess);
1674
1675         rte_cryptodev_trace_sym_session_free(sess);
1676         return 0;
1677 }
1678
1679 int
1680 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1681 {
1682         uint8_t i;
1683         void *sess_priv;
1684         struct rte_mempool *sess_mp;
1685
1686         if (sess == NULL)
1687                 return -EINVAL;
1688
1689         /* Check that all device private data has been freed */
1690         for (i = 0; i < nb_drivers; i++) {
1691                 sess_priv = get_asym_session_private_data(sess, i);
1692                 if (sess_priv != NULL)
1693                         return -EBUSY;
1694         }
1695
1696         /* Return session to mempool */
1697         sess_mp = rte_mempool_from_obj(sess);
1698         rte_mempool_put(sess_mp, sess);
1699
1700         rte_cryptodev_trace_asym_session_free(sess);
1701         return 0;
1702 }
1703
1704 unsigned int
1705 rte_cryptodev_sym_get_header_session_size(void)
1706 {
1707         /*
1708          * Header contains pointers to the private data of all registered
1709          * drivers and all necessary information to ensure safely clear
1710          * or free al session.
1711          */
1712         struct rte_cryptodev_sym_session s = {0};
1713
1714         s.nb_drivers = nb_drivers;
1715
1716         return (unsigned int)(sizeof(s) +
1717                         rte_cryptodev_sym_session_data_size(&s));
1718 }
1719
1720 unsigned int
1721 rte_cryptodev_sym_get_existing_header_session_size(
1722                 struct rte_cryptodev_sym_session *sess)
1723 {
1724         if (!sess)
1725                 return 0;
1726         else
1727                 return (unsigned int)(sizeof(*sess) +
1728                                 rte_cryptodev_sym_session_data_size(sess));
1729 }
1730
1731 unsigned int
1732 rte_cryptodev_asym_get_header_session_size(void)
1733 {
1734         /*
1735          * Header contains pointers to the private data
1736          * of all registered drivers, and a flag which
1737          * indicates presence of private data
1738          */
1739         return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1740 }
1741
1742 unsigned int
1743 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1744 {
1745         struct rte_cryptodev *dev;
1746         unsigned int priv_sess_size;
1747
1748         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1749                 return 0;
1750
1751         dev = rte_cryptodev_pmd_get_dev(dev_id);
1752
1753         if (*dev->dev_ops->sym_session_get_size == NULL)
1754                 return 0;
1755
1756         priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1757
1758         return priv_sess_size;
1759 }
1760
1761 unsigned int
1762 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1763 {
1764         struct rte_cryptodev *dev;
1765         unsigned int header_size = sizeof(void *) * nb_drivers;
1766         unsigned int priv_sess_size;
1767
1768         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1769                 return 0;
1770
1771         dev = rte_cryptodev_pmd_get_dev(dev_id);
1772
1773         if (*dev->dev_ops->asym_session_get_size == NULL)
1774                 return 0;
1775
1776         priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1777         if (priv_sess_size < header_size)
1778                 return header_size;
1779
1780         return priv_sess_size;
1781
1782 }
1783
1784 int
1785 rte_cryptodev_sym_session_set_user_data(
1786                                         struct rte_cryptodev_sym_session *sess,
1787                                         void *data,
1788                                         uint16_t size)
1789 {
1790         if (sess == NULL)
1791                 return -EINVAL;
1792
1793         if (sess->user_data_sz < size)
1794                 return -ENOMEM;
1795
1796         rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1797         return 0;
1798 }
1799
1800 void *
1801 rte_cryptodev_sym_session_get_user_data(
1802                                         struct rte_cryptodev_sym_session *sess)
1803 {
1804         if (sess == NULL || sess->user_data_sz == 0)
1805                 return NULL;
1806
1807         return (void *)(sess->sess_data + sess->nb_drivers);
1808 }
1809
1810 static inline void
1811 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
1812 {
1813         uint32_t i;
1814         for (i = 0; i < vec->num; i++)
1815                 vec->status[i] = errnum;
1816 }
1817
1818 uint32_t
1819 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1820         struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1821         struct rte_crypto_sym_vec *vec)
1822 {
1823         struct rte_cryptodev *dev;
1824
1825         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1826                 sym_crypto_fill_status(vec, EINVAL);
1827                 return 0;
1828         }
1829
1830         dev = rte_cryptodev_pmd_get_dev(dev_id);
1831
1832         if (*dev->dev_ops->sym_cpu_process == NULL ||
1833                 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
1834                 sym_crypto_fill_status(vec, ENOTSUP);
1835                 return 0;
1836         }
1837
1838         return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
1839 }
1840
1841 /** Initialise rte_crypto_op mempool element */
1842 static void
1843 rte_crypto_op_init(struct rte_mempool *mempool,
1844                 void *opaque_arg,
1845                 void *_op_data,
1846                 __rte_unused unsigned i)
1847 {
1848         struct rte_crypto_op *op = _op_data;
1849         enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1850
1851         memset(_op_data, 0, mempool->elt_size);
1852
1853         __rte_crypto_op_reset(op, type);
1854
1855         op->phys_addr = rte_mem_virt2iova(_op_data);
1856         op->mempool = mempool;
1857 }
1858
1859
1860 struct rte_mempool *
1861 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1862                 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1863                 int socket_id)
1864 {
1865         struct rte_crypto_op_pool_private *priv;
1866
1867         unsigned elt_size = sizeof(struct rte_crypto_op) +
1868                         priv_size;
1869
1870         if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1871                 elt_size += sizeof(struct rte_crypto_sym_op);
1872         } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1873                 elt_size += sizeof(struct rte_crypto_asym_op);
1874         } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1875                 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1876                                     sizeof(struct rte_crypto_asym_op));
1877         } else {
1878                 CDEV_LOG_ERR("Invalid op_type\n");
1879                 return NULL;
1880         }
1881
1882         /* lookup mempool in case already allocated */
1883         struct rte_mempool *mp = rte_mempool_lookup(name);
1884
1885         if (mp != NULL) {
1886                 priv = (struct rte_crypto_op_pool_private *)
1887                                 rte_mempool_get_priv(mp);
1888
1889                 if (mp->elt_size != elt_size ||
1890                                 mp->cache_size < cache_size ||
1891                                 mp->size < nb_elts ||
1892                                 priv->priv_size <  priv_size) {
1893                         mp = NULL;
1894                         CDEV_LOG_ERR("Mempool %s already exists but with "
1895                                         "incompatible parameters", name);
1896                         return NULL;
1897                 }
1898                 return mp;
1899         }
1900
1901         mp = rte_mempool_create(
1902                         name,
1903                         nb_elts,
1904                         elt_size,
1905                         cache_size,
1906                         sizeof(struct rte_crypto_op_pool_private),
1907                         NULL,
1908                         NULL,
1909                         rte_crypto_op_init,
1910                         &type,
1911                         socket_id,
1912                         0);
1913
1914         if (mp == NULL) {
1915                 CDEV_LOG_ERR("Failed to create mempool %s", name);
1916                 return NULL;
1917         }
1918
1919         priv = (struct rte_crypto_op_pool_private *)
1920                         rte_mempool_get_priv(mp);
1921
1922         priv->priv_size = priv_size;
1923         priv->type = type;
1924
1925         return mp;
1926 }
1927
1928 int
1929 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1930 {
1931         struct rte_cryptodev *dev = NULL;
1932         uint32_t i = 0;
1933
1934         if (name == NULL)
1935                 return -EINVAL;
1936
1937         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1938                 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1939                                 "%s_%u", dev_name_prefix, i);
1940
1941                 if (ret < 0)
1942                         return ret;
1943
1944                 dev = rte_cryptodev_pmd_get_named_dev(name);
1945                 if (!dev)
1946                         return 0;
1947         }
1948
1949         return -1;
1950 }
1951
1952 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1953
1954 static struct cryptodev_driver_list cryptodev_driver_list =
1955         TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1956
1957 int
1958 rte_cryptodev_driver_id_get(const char *name)
1959 {
1960         struct cryptodev_driver *driver;
1961         const char *driver_name;
1962
1963         if (name == NULL) {
1964                 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1965                 return -1;
1966         }
1967
1968         TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1969                 driver_name = driver->driver->name;
1970                 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
1971                         return driver->id;
1972         }
1973         return -1;
1974 }
1975
1976 const char *
1977 rte_cryptodev_name_get(uint8_t dev_id)
1978 {
1979         struct rte_cryptodev *dev;
1980
1981         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1982                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1983                 return NULL;
1984         }
1985
1986         dev = rte_cryptodev_pmd_get_dev(dev_id);
1987         if (dev == NULL)
1988                 return NULL;
1989
1990         return dev->data->name;
1991 }
1992
1993 const char *
1994 rte_cryptodev_driver_name_get(uint8_t driver_id)
1995 {
1996         struct cryptodev_driver *driver;
1997
1998         TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1999                 if (driver->id == driver_id)
2000                         return driver->driver->name;
2001         return NULL;
2002 }
2003
2004 uint8_t
2005 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2006                 const struct rte_driver *drv)
2007 {
2008         crypto_drv->driver = drv;
2009         crypto_drv->id = nb_drivers;
2010
2011         TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2012
2013         return nb_drivers++;
2014 }