cryptodev: add function to check queue pair status
[dpdk.git] / lib / librte_cryptodev / rte_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39 #include <rte_compat.h>
40 #include <rte_function_versioning.h>
41
42 #include "rte_crypto.h"
43 #include "rte_cryptodev.h"
44 #include "rte_cryptodev_pmd.h"
45 #include "rte_cryptodev_trace.h"
46
47 static uint8_t nb_drivers;
48
49 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
50
51 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
52
53 static struct rte_cryptodev_global cryptodev_globals = {
54                 .devs                   = rte_crypto_devices,
55                 .data                   = { NULL },
56                 .nb_devs                = 0
57 };
58
59 /* spinlock for crypto device callbacks */
60 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
61
62 static const struct rte_cryptodev_capabilities
63                 cryptodev_undefined_capabilities[] = {
64                 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
65 };
66
67 static struct rte_cryptodev_capabilities
68                 *capability_copy[RTE_CRYPTO_MAX_DEVS];
69 static uint8_t is_capability_checked[RTE_CRYPTO_MAX_DEVS];
70
71 /**
72  * The user application callback description.
73  *
74  * It contains callback address to be registered by user application,
75  * the pointer to the parameters for callback, and the event type.
76  */
77 struct rte_cryptodev_callback {
78         TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
79         rte_cryptodev_cb_fn cb_fn;              /**< Callback address */
80         void *cb_arg;                           /**< Parameter for callback */
81         enum rte_cryptodev_event_type event;    /**< Interrupt event type */
82         uint32_t active;                        /**< Callback is executing */
83 };
84
85 /**
86  * The crypto cipher algorithm strings identifiers.
87  * It could be used in application command line.
88  */
89 const char *
90 rte_crypto_cipher_algorithm_strings[] = {
91         [RTE_CRYPTO_CIPHER_3DES_CBC]    = "3des-cbc",
92         [RTE_CRYPTO_CIPHER_3DES_ECB]    = "3des-ecb",
93         [RTE_CRYPTO_CIPHER_3DES_CTR]    = "3des-ctr",
94
95         [RTE_CRYPTO_CIPHER_AES_CBC]     = "aes-cbc",
96         [RTE_CRYPTO_CIPHER_AES_CTR]     = "aes-ctr",
97         [RTE_CRYPTO_CIPHER_AES_DOCSISBPI]       = "aes-docsisbpi",
98         [RTE_CRYPTO_CIPHER_AES_ECB]     = "aes-ecb",
99         [RTE_CRYPTO_CIPHER_AES_F8]      = "aes-f8",
100         [RTE_CRYPTO_CIPHER_AES_XTS]     = "aes-xts",
101
102         [RTE_CRYPTO_CIPHER_ARC4]        = "arc4",
103
104         [RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
105         [RTE_CRYPTO_CIPHER_DES_DOCSISBPI]       = "des-docsisbpi",
106
107         [RTE_CRYPTO_CIPHER_NULL]        = "null",
108
109         [RTE_CRYPTO_CIPHER_KASUMI_F8]   = "kasumi-f8",
110         [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
111         [RTE_CRYPTO_CIPHER_ZUC_EEA3]    = "zuc-eea3"
112 };
113
114 /**
115  * The crypto cipher operation strings identifiers.
116  * It could be used in application command line.
117  */
118 const char *
119 rte_crypto_cipher_operation_strings[] = {
120                 [RTE_CRYPTO_CIPHER_OP_ENCRYPT]  = "encrypt",
121                 [RTE_CRYPTO_CIPHER_OP_DECRYPT]  = "decrypt"
122 };
123
124 /**
125  * The crypto auth algorithm strings identifiers.
126  * It could be used in application command line.
127  */
128 const char *
129 rte_crypto_auth_algorithm_strings[] = {
130         [RTE_CRYPTO_AUTH_AES_CBC_MAC]   = "aes-cbc-mac",
131         [RTE_CRYPTO_AUTH_AES_CMAC]      = "aes-cmac",
132         [RTE_CRYPTO_AUTH_AES_GMAC]      = "aes-gmac",
133         [RTE_CRYPTO_AUTH_AES_XCBC_MAC]  = "aes-xcbc-mac",
134
135         [RTE_CRYPTO_AUTH_MD5]           = "md5",
136         [RTE_CRYPTO_AUTH_MD5_HMAC]      = "md5-hmac",
137
138         [RTE_CRYPTO_AUTH_NULL]          = "null",
139
140         [RTE_CRYPTO_AUTH_SHA1]          = "sha1",
141         [RTE_CRYPTO_AUTH_SHA1_HMAC]     = "sha1-hmac",
142
143         [RTE_CRYPTO_AUTH_SHA224]        = "sha2-224",
144         [RTE_CRYPTO_AUTH_SHA224_HMAC]   = "sha2-224-hmac",
145         [RTE_CRYPTO_AUTH_SHA256]        = "sha2-256",
146         [RTE_CRYPTO_AUTH_SHA256_HMAC]   = "sha2-256-hmac",
147         [RTE_CRYPTO_AUTH_SHA384]        = "sha2-384",
148         [RTE_CRYPTO_AUTH_SHA384_HMAC]   = "sha2-384-hmac",
149         [RTE_CRYPTO_AUTH_SHA512]        = "sha2-512",
150         [RTE_CRYPTO_AUTH_SHA512_HMAC]   = "sha2-512-hmac",
151
152         [RTE_CRYPTO_AUTH_KASUMI_F9]     = "kasumi-f9",
153         [RTE_CRYPTO_AUTH_SNOW3G_UIA2]   = "snow3g-uia2",
154         [RTE_CRYPTO_AUTH_ZUC_EIA3]      = "zuc-eia3"
155 };
156
157 /**
158  * The crypto AEAD algorithm strings identifiers.
159  * It could be used in application command line.
160  */
161 const char *
162 rte_crypto_aead_algorithm_strings[] = {
163         [RTE_CRYPTO_AEAD_AES_CCM]       = "aes-ccm",
164         [RTE_CRYPTO_AEAD_AES_GCM]       = "aes-gcm",
165         [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
166 };
167
168 /**
169  * The crypto AEAD operation strings identifiers.
170  * It could be used in application command line.
171  */
172 const char *
173 rte_crypto_aead_operation_strings[] = {
174         [RTE_CRYPTO_AEAD_OP_ENCRYPT]    = "encrypt",
175         [RTE_CRYPTO_AEAD_OP_DECRYPT]    = "decrypt"
176 };
177
178 /**
179  * Asymmetric crypto transform operation strings identifiers.
180  */
181 const char *rte_crypto_asym_xform_strings[] = {
182         [RTE_CRYPTO_ASYM_XFORM_NONE]    = "none",
183         [RTE_CRYPTO_ASYM_XFORM_RSA]     = "rsa",
184         [RTE_CRYPTO_ASYM_XFORM_MODEX]   = "modexp",
185         [RTE_CRYPTO_ASYM_XFORM_MODINV]  = "modinv",
186         [RTE_CRYPTO_ASYM_XFORM_DH]      = "dh",
187         [RTE_CRYPTO_ASYM_XFORM_DSA]     = "dsa",
188         [RTE_CRYPTO_ASYM_XFORM_ECDSA]   = "ecdsa",
189         [RTE_CRYPTO_ASYM_XFORM_ECPM]    = "ecpm",
190 };
191
192 /**
193  * Asymmetric crypto operation strings identifiers.
194  */
195 const char *rte_crypto_asym_op_strings[] = {
196         [RTE_CRYPTO_ASYM_OP_ENCRYPT]    = "encrypt",
197         [RTE_CRYPTO_ASYM_OP_DECRYPT]    = "decrypt",
198         [RTE_CRYPTO_ASYM_OP_SIGN]       = "sign",
199         [RTE_CRYPTO_ASYM_OP_VERIFY]     = "verify",
200         [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]       = "priv_key_generate",
201         [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
202         [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
203 };
204
205 /**
206  * The private data structure stored in the session mempool private data.
207  */
208 struct rte_cryptodev_sym_session_pool_private_data {
209         uint16_t nb_drivers;
210         /**< number of elements in sess_data array */
211         uint16_t user_data_sz;
212         /**< session user data will be placed after sess_data */
213 };
214
215 int
216 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
217                 const char *algo_string)
218 {
219         unsigned int i;
220
221         for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
222                 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
223                         *algo_enum = (enum rte_crypto_cipher_algorithm) i;
224                         return 0;
225                 }
226         }
227
228         /* Invalid string */
229         return -1;
230 }
231
232 int
233 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
234                 const char *algo_string)
235 {
236         unsigned int i;
237
238         for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
239                 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
240                         *algo_enum = (enum rte_crypto_auth_algorithm) i;
241                         return 0;
242                 }
243         }
244
245         /* Invalid string */
246         return -1;
247 }
248
249 int
250 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
251                 const char *algo_string)
252 {
253         unsigned int i;
254
255         for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
256                 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
257                         *algo_enum = (enum rte_crypto_aead_algorithm) i;
258                         return 0;
259                 }
260         }
261
262         /* Invalid string */
263         return -1;
264 }
265
266 int
267 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
268                 const char *xform_string)
269 {
270         unsigned int i;
271
272         for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
273                 if (strcmp(xform_string,
274                         rte_crypto_asym_xform_strings[i]) == 0) {
275                         *xform_enum = (enum rte_crypto_asym_xform_type) i;
276                         return 0;
277                 }
278         }
279
280         /* Invalid string */
281         return -1;
282 }
283
284 /**
285  * The crypto auth operation strings identifiers.
286  * It could be used in application command line.
287  */
288 const char *
289 rte_crypto_auth_operation_strings[] = {
290                 [RTE_CRYPTO_AUTH_OP_VERIFY]     = "verify",
291                 [RTE_CRYPTO_AUTH_OP_GENERATE]   = "generate"
292 };
293
294 const struct rte_cryptodev_symmetric_capability __vsym *
295 rte_cryptodev_sym_capability_get_v20(uint8_t dev_id,
296                 const struct rte_cryptodev_sym_capability_idx *idx)
297 {
298         const struct rte_cryptodev_capabilities *capability;
299         struct rte_cryptodev_info dev_info;
300         int i = 0;
301
302         rte_cryptodev_info_get_v20(dev_id, &dev_info);
303
304         while ((capability = &dev_info.capabilities[i++])->op !=
305                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
306                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
307                         continue;
308
309                 if (capability->sym.xform_type != idx->type)
310                         continue;
311
312                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
313                         capability->sym.auth.algo == idx->algo.auth)
314                         return &capability->sym;
315
316                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
317                         capability->sym.cipher.algo == idx->algo.cipher)
318                         return &capability->sym;
319
320                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
321                                 capability->sym.aead.algo == idx->algo.aead)
322                         return &capability->sym;
323         }
324
325         return NULL;
326 }
327 VERSION_SYMBOL(rte_cryptodev_sym_capability_get, _v20, 20.0);
328
329 const struct rte_cryptodev_symmetric_capability __vsym *
330 rte_cryptodev_sym_capability_get_v21(uint8_t dev_id,
331                 const struct rte_cryptodev_sym_capability_idx *idx)
332 {
333         const struct rte_cryptodev_capabilities *capability;
334         struct rte_cryptodev_info dev_info;
335         int i = 0;
336
337         rte_cryptodev_info_get(dev_id, &dev_info);
338
339         while ((capability = &dev_info.capabilities[i++])->op !=
340                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
341                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
342                         continue;
343
344                 if (capability->sym.xform_type != idx->type)
345                         continue;
346
347                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
348                         capability->sym.auth.algo == idx->algo.auth)
349                         return &capability->sym;
350
351                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
352                         capability->sym.cipher.algo == idx->algo.cipher)
353                         return &capability->sym;
354
355                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
356                                 capability->sym.aead.algo == idx->algo.aead)
357                         return &capability->sym;
358         }
359
360         return NULL;
361 }
362 MAP_STATIC_SYMBOL(const struct rte_cryptodev_symmetric_capability *
363                 rte_cryptodev_sym_capability_get(uint8_t dev_id,
364                 const struct rte_cryptodev_sym_capability_idx *idx),
365                 rte_cryptodev_sym_capability_get_v21);
366 BIND_DEFAULT_SYMBOL(rte_cryptodev_sym_capability_get, _v21, 21);
367
368 static int
369 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
370 {
371         unsigned int next_size;
372
373         /* Check lower/upper bounds */
374         if (size < range->min)
375                 return -1;
376
377         if (size > range->max)
378                 return -1;
379
380         /* If range is actually only one value, size is correct */
381         if (range->increment == 0)
382                 return 0;
383
384         /* Check if value is one of the supported sizes */
385         for (next_size = range->min; next_size <= range->max;
386                         next_size += range->increment)
387                 if (size == next_size)
388                         return 0;
389
390         return -1;
391 }
392
393 const struct rte_cryptodev_asymmetric_xform_capability *
394 rte_cryptodev_asym_capability_get(uint8_t dev_id,
395                 const struct rte_cryptodev_asym_capability_idx *idx)
396 {
397         const struct rte_cryptodev_capabilities *capability;
398         struct rte_cryptodev_info dev_info;
399         unsigned int i = 0;
400
401         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
402         rte_cryptodev_info_get(dev_id, &dev_info);
403
404         while ((capability = &dev_info.capabilities[i++])->op !=
405                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
406                 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
407                         continue;
408
409                 if (capability->asym.xform_capa.xform_type == idx->type)
410                         return &capability->asym.xform_capa;
411         }
412         return NULL;
413 };
414
415 int
416 rte_cryptodev_sym_capability_check_cipher(
417                 const struct rte_cryptodev_symmetric_capability *capability,
418                 uint16_t key_size, uint16_t iv_size)
419 {
420         if (param_range_check(key_size, &capability->cipher.key_size) != 0)
421                 return -1;
422
423         if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
424                 return -1;
425
426         return 0;
427 }
428
429 int
430 rte_cryptodev_sym_capability_check_auth(
431                 const struct rte_cryptodev_symmetric_capability *capability,
432                 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
433 {
434         if (param_range_check(key_size, &capability->auth.key_size) != 0)
435                 return -1;
436
437         if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
438                 return -1;
439
440         if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
441                 return -1;
442
443         return 0;
444 }
445
446 int
447 rte_cryptodev_sym_capability_check_aead(
448                 const struct rte_cryptodev_symmetric_capability *capability,
449                 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
450                 uint16_t iv_size)
451 {
452         if (param_range_check(key_size, &capability->aead.key_size) != 0)
453                 return -1;
454
455         if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
456                 return -1;
457
458         if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
459                 return -1;
460
461         if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
462                 return -1;
463
464         return 0;
465 }
466 int
467 rte_cryptodev_asym_xform_capability_check_optype(
468         const struct rte_cryptodev_asymmetric_xform_capability *capability,
469         enum rte_crypto_asym_op_type op_type)
470 {
471         if (capability->op_types & (1 << op_type))
472                 return 1;
473
474         return 0;
475 }
476
477 int
478 rte_cryptodev_asym_xform_capability_check_modlen(
479         const struct rte_cryptodev_asymmetric_xform_capability *capability,
480         uint16_t modlen)
481 {
482         /* no need to check for limits, if min or max = 0 */
483         if (capability->modlen.min != 0) {
484                 if (modlen < capability->modlen.min)
485                         return -1;
486         }
487
488         if (capability->modlen.max != 0) {
489                 if (modlen > capability->modlen.max)
490                         return -1;
491         }
492
493         /* in any case, check if given modlen is module increment */
494         if (capability->modlen.increment != 0) {
495                 if (modlen % (capability->modlen.increment))
496                         return -1;
497         }
498
499         return 0;
500 }
501
502
503 const char *
504 rte_cryptodev_get_feature_name(uint64_t flag)
505 {
506         switch (flag) {
507         case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
508                 return "SYMMETRIC_CRYPTO";
509         case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
510                 return "ASYMMETRIC_CRYPTO";
511         case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
512                 return "SYM_OPERATION_CHAINING";
513         case RTE_CRYPTODEV_FF_CPU_SSE:
514                 return "CPU_SSE";
515         case RTE_CRYPTODEV_FF_CPU_AVX:
516                 return "CPU_AVX";
517         case RTE_CRYPTODEV_FF_CPU_AVX2:
518                 return "CPU_AVX2";
519         case RTE_CRYPTODEV_FF_CPU_AVX512:
520                 return "CPU_AVX512";
521         case RTE_CRYPTODEV_FF_CPU_AESNI:
522                 return "CPU_AESNI";
523         case RTE_CRYPTODEV_FF_HW_ACCELERATED:
524                 return "HW_ACCELERATED";
525         case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
526                 return "IN_PLACE_SGL";
527         case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
528                 return "OOP_SGL_IN_SGL_OUT";
529         case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
530                 return "OOP_SGL_IN_LB_OUT";
531         case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
532                 return "OOP_LB_IN_SGL_OUT";
533         case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
534                 return "OOP_LB_IN_LB_OUT";
535         case RTE_CRYPTODEV_FF_CPU_NEON:
536                 return "CPU_NEON";
537         case RTE_CRYPTODEV_FF_CPU_ARM_CE:
538                 return "CPU_ARM_CE";
539         case RTE_CRYPTODEV_FF_SECURITY:
540                 return "SECURITY_PROTOCOL";
541         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
542                 return "RSA_PRIV_OP_KEY_EXP";
543         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
544                 return "RSA_PRIV_OP_KEY_QT";
545         case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
546                 return "DIGEST_ENCRYPTED";
547         case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
548                 return "SYM_CPU_CRYPTO";
549         case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
550                 return "ASYM_SESSIONLESS";
551         case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
552                 return "SYM_SESSIONLESS";
553         case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
554                 return "NON_BYTE_ALIGNED_DATA";
555         default:
556                 return NULL;
557         }
558 }
559
560 struct rte_cryptodev *
561 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
562 {
563         return &cryptodev_globals.devs[dev_id];
564 }
565
566 struct rte_cryptodev *
567 rte_cryptodev_pmd_get_named_dev(const char *name)
568 {
569         struct rte_cryptodev *dev;
570         unsigned int i;
571
572         if (name == NULL)
573                 return NULL;
574
575         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
576                 dev = &cryptodev_globals.devs[i];
577
578                 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
579                                 (strcmp(dev->data->name, name) == 0))
580                         return dev;
581         }
582
583         return NULL;
584 }
585
586 static inline uint8_t
587 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
588 {
589         if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
590                         rte_crypto_devices[dev_id].data == NULL)
591                 return 0;
592
593         return 1;
594 }
595
596 unsigned int
597 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
598 {
599         struct rte_cryptodev *dev = NULL;
600
601         if (!rte_cryptodev_is_valid_device_data(dev_id))
602                 return 0;
603
604         dev = rte_cryptodev_pmd_get_dev(dev_id);
605         if (dev->attached != RTE_CRYPTODEV_ATTACHED)
606                 return 0;
607         else
608                 return 1;
609 }
610
611
612 int
613 rte_cryptodev_get_dev_id(const char *name)
614 {
615         unsigned i;
616
617         if (name == NULL)
618                 return -1;
619
620         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
621                 if (!rte_cryptodev_is_valid_device_data(i))
622                         continue;
623                 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
624                                 == 0) &&
625                                 (cryptodev_globals.devs[i].attached ==
626                                                 RTE_CRYPTODEV_ATTACHED))
627                         return i;
628         }
629
630         return -1;
631 }
632
633 uint8_t
634 rte_cryptodev_count(void)
635 {
636         return cryptodev_globals.nb_devs;
637 }
638
639 uint8_t
640 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
641 {
642         uint8_t i, dev_count = 0;
643
644         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
645                 if (cryptodev_globals.devs[i].driver_id == driver_id &&
646                         cryptodev_globals.devs[i].attached ==
647                                         RTE_CRYPTODEV_ATTACHED)
648                         dev_count++;
649
650         return dev_count;
651 }
652
653 uint8_t
654 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
655         uint8_t nb_devices)
656 {
657         uint8_t i, count = 0;
658         struct rte_cryptodev *devs = cryptodev_globals.devs;
659
660         for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
661                 if (!rte_cryptodev_is_valid_device_data(i))
662                         continue;
663
664                 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
665                         int cmp;
666
667                         cmp = strncmp(devs[i].device->driver->name,
668                                         driver_name,
669                                         strlen(driver_name) + 1);
670
671                         if (cmp == 0)
672                                 devices[count++] = devs[i].data->dev_id;
673                 }
674         }
675
676         return count;
677 }
678
679 void *
680 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
681 {
682         if (dev_id < RTE_CRYPTO_MAX_DEVS &&
683                         (rte_crypto_devices[dev_id].feature_flags &
684                         RTE_CRYPTODEV_FF_SECURITY))
685                 return rte_crypto_devices[dev_id].security_ctx;
686
687         return NULL;
688 }
689
690 int
691 rte_cryptodev_socket_id(uint8_t dev_id)
692 {
693         struct rte_cryptodev *dev;
694
695         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
696                 return -1;
697
698         dev = rte_cryptodev_pmd_get_dev(dev_id);
699
700         return dev->data->socket_id;
701 }
702
703 static inline int
704 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
705                 int socket_id)
706 {
707         char mz_name[RTE_MEMZONE_NAMESIZE];
708         const struct rte_memzone *mz;
709         int n;
710
711         /* generate memzone name */
712         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
713         if (n >= (int)sizeof(mz_name))
714                 return -EINVAL;
715
716         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
717                 mz = rte_memzone_reserve(mz_name,
718                                 sizeof(struct rte_cryptodev_data),
719                                 socket_id, 0);
720         } else
721                 mz = rte_memzone_lookup(mz_name);
722
723         if (mz == NULL)
724                 return -ENOMEM;
725
726         *data = mz->addr;
727         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
728                 memset(*data, 0, sizeof(struct rte_cryptodev_data));
729
730         return 0;
731 }
732
733 static inline int
734 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
735 {
736         char mz_name[RTE_MEMZONE_NAMESIZE];
737         const struct rte_memzone *mz;
738         int n;
739
740         /* generate memzone name */
741         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
742         if (n >= (int)sizeof(mz_name))
743                 return -EINVAL;
744
745         mz = rte_memzone_lookup(mz_name);
746         if (mz == NULL)
747                 return -ENOMEM;
748
749         RTE_ASSERT(*data == mz->addr);
750         *data = NULL;
751
752         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
753                 return rte_memzone_free(mz);
754
755         return 0;
756 }
757
758 static uint8_t
759 rte_cryptodev_find_free_device_index(void)
760 {
761         uint8_t dev_id;
762
763         for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
764                 if (rte_crypto_devices[dev_id].attached ==
765                                 RTE_CRYPTODEV_DETACHED)
766                         return dev_id;
767         }
768         return RTE_CRYPTO_MAX_DEVS;
769 }
770
771 struct rte_cryptodev *
772 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
773 {
774         struct rte_cryptodev *cryptodev;
775         uint8_t dev_id;
776
777         if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
778                 CDEV_LOG_ERR("Crypto device with name %s already "
779                                 "allocated!", name);
780                 return NULL;
781         }
782
783         dev_id = rte_cryptodev_find_free_device_index();
784         if (dev_id == RTE_CRYPTO_MAX_DEVS) {
785                 CDEV_LOG_ERR("Reached maximum number of crypto devices");
786                 return NULL;
787         }
788
789         cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
790
791         if (cryptodev->data == NULL) {
792                 struct rte_cryptodev_data **cryptodev_data =
793                                 &cryptodev_globals.data[dev_id];
794
795                 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
796                                 socket_id);
797
798                 if (retval < 0 || *cryptodev_data == NULL)
799                         return NULL;
800
801                 cryptodev->data = *cryptodev_data;
802
803                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
804                         strlcpy(cryptodev->data->name, name,
805                                 RTE_CRYPTODEV_NAME_MAX_LEN);
806
807                         cryptodev->data->dev_id = dev_id;
808                         cryptodev->data->socket_id = socket_id;
809                         cryptodev->data->dev_started = 0;
810                 }
811
812                 /* init user callbacks */
813                 TAILQ_INIT(&(cryptodev->link_intr_cbs));
814
815                 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
816
817                 cryptodev_globals.nb_devs++;
818         }
819
820         return cryptodev;
821 }
822
823 int
824 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
825 {
826         int ret;
827         uint8_t dev_id;
828
829         if (cryptodev == NULL)
830                 return -EINVAL;
831
832         dev_id = cryptodev->data->dev_id;
833
834         /* Close device only if device operations have been set */
835         if (cryptodev->dev_ops) {
836                 ret = rte_cryptodev_close(dev_id);
837                 if (ret < 0)
838                         return ret;
839         }
840
841         ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
842         if (ret < 0)
843                 return ret;
844
845         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
846         cryptodev_globals.nb_devs--;
847         return 0;
848 }
849
850 uint16_t
851 rte_cryptodev_queue_pair_count(uint8_t dev_id)
852 {
853         struct rte_cryptodev *dev;
854
855         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
856                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
857                 return 0;
858         }
859
860         dev = &rte_crypto_devices[dev_id];
861         return dev->data->nb_queue_pairs;
862 }
863
864 static int
865 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
866                 int socket_id)
867 {
868         struct rte_cryptodev_info dev_info;
869         void **qp;
870         unsigned i;
871
872         if ((dev == NULL) || (nb_qpairs < 1)) {
873                 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
874                                                         dev, nb_qpairs);
875                 return -EINVAL;
876         }
877
878         CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
879                         nb_qpairs, dev->data->dev_id);
880
881         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
882
883         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
884         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
885
886         if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
887                 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
888                                 nb_qpairs, dev->data->dev_id);
889             return -EINVAL;
890         }
891
892         if (dev->data->queue_pairs == NULL) { /* first time configuration */
893                 dev->data->queue_pairs = rte_zmalloc_socket(
894                                 "cryptodev->queue_pairs",
895                                 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
896                                 RTE_CACHE_LINE_SIZE, socket_id);
897
898                 if (dev->data->queue_pairs == NULL) {
899                         dev->data->nb_queue_pairs = 0;
900                         CDEV_LOG_ERR("failed to get memory for qp meta data, "
901                                                         "nb_queues %u",
902                                                         nb_qpairs);
903                         return -(ENOMEM);
904                 }
905         } else { /* re-configure */
906                 int ret;
907                 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
908
909                 qp = dev->data->queue_pairs;
910
911                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
912                                 -ENOTSUP);
913
914                 for (i = nb_qpairs; i < old_nb_queues; i++) {
915                         ret = (*dev->dev_ops->queue_pair_release)(dev, i);
916                         if (ret < 0)
917                                 return ret;
918                 }
919
920                 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
921                                 RTE_CACHE_LINE_SIZE);
922                 if (qp == NULL) {
923                         CDEV_LOG_ERR("failed to realloc qp meta data,"
924                                                 " nb_queues %u", nb_qpairs);
925                         return -(ENOMEM);
926                 }
927
928                 if (nb_qpairs > old_nb_queues) {
929                         uint16_t new_qs = nb_qpairs - old_nb_queues;
930
931                         memset(qp + old_nb_queues, 0,
932                                 sizeof(qp[0]) * new_qs);
933                 }
934
935                 dev->data->queue_pairs = qp;
936
937         }
938         dev->data->nb_queue_pairs = nb_qpairs;
939         return 0;
940 }
941
942 int
943 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
944 {
945         struct rte_cryptodev *dev;
946         int diag;
947
948         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
949                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
950                 return -EINVAL;
951         }
952
953         dev = &rte_crypto_devices[dev_id];
954
955         if (dev->data->dev_started) {
956                 CDEV_LOG_ERR(
957                     "device %d must be stopped to allow configuration", dev_id);
958                 return -EBUSY;
959         }
960
961         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
962
963         /* Setup new number of queue pairs and reconfigure device. */
964         diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
965                         config->socket_id);
966         if (diag != 0) {
967                 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
968                                 dev_id, diag);
969                 return diag;
970         }
971
972         rte_cryptodev_trace_configure(dev_id, config);
973         return (*dev->dev_ops->dev_configure)(dev, config);
974 }
975
976
977 int
978 rte_cryptodev_start(uint8_t dev_id)
979 {
980         struct rte_cryptodev *dev;
981         int diag;
982
983         CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
984
985         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
986                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
987                 return -EINVAL;
988         }
989
990         dev = &rte_crypto_devices[dev_id];
991
992         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
993
994         if (dev->data->dev_started != 0) {
995                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
996                         dev_id);
997                 return 0;
998         }
999
1000         diag = (*dev->dev_ops->dev_start)(dev);
1001         rte_cryptodev_trace_start(dev_id, diag);
1002         if (diag == 0)
1003                 dev->data->dev_started = 1;
1004         else
1005                 return diag;
1006
1007         return 0;
1008 }
1009
1010 void
1011 rte_cryptodev_stop(uint8_t dev_id)
1012 {
1013         struct rte_cryptodev *dev;
1014
1015         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1016                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1017                 return;
1018         }
1019
1020         dev = &rte_crypto_devices[dev_id];
1021
1022         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1023
1024         if (dev->data->dev_started == 0) {
1025                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1026                         dev_id);
1027                 return;
1028         }
1029
1030         (*dev->dev_ops->dev_stop)(dev);
1031         rte_cryptodev_trace_stop(dev_id);
1032         dev->data->dev_started = 0;
1033 }
1034
1035 int
1036 rte_cryptodev_close(uint8_t dev_id)
1037 {
1038         struct rte_cryptodev *dev;
1039         int retval;
1040
1041         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1042                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1043                 return -1;
1044         }
1045
1046         dev = &rte_crypto_devices[dev_id];
1047
1048         /* Device must be stopped before it can be closed */
1049         if (dev->data->dev_started == 1) {
1050                 CDEV_LOG_ERR("Device %u must be stopped before closing",
1051                                 dev_id);
1052                 return -EBUSY;
1053         }
1054
1055         /* We can't close the device if there are outstanding sessions in use */
1056         if (dev->data->session_pool != NULL) {
1057                 if (!rte_mempool_full(dev->data->session_pool)) {
1058                         CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1059                                         "has sessions still in use, free "
1060                                         "all sessions before calling close",
1061                                         (unsigned)dev_id);
1062                         return -EBUSY;
1063                 }
1064         }
1065
1066         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1067         retval = (*dev->dev_ops->dev_close)(dev);
1068         rte_cryptodev_trace_close(dev_id, retval);
1069
1070         if (capability_copy[dev_id]) {
1071                 free(capability_copy[dev_id]);
1072                 capability_copy[dev_id] = NULL;
1073         }
1074         is_capability_checked[dev_id] = 0;
1075
1076         if (retval < 0)
1077                 return retval;
1078
1079         return 0;
1080 }
1081
1082 int
1083 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1084 {
1085         struct rte_cryptodev *dev;
1086
1087         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1088                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1089                 return -EINVAL;
1090         }
1091
1092         dev = &rte_crypto_devices[dev_id];
1093         if (queue_pair_id >= dev->data->nb_queue_pairs) {
1094                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1095                 return -EINVAL;
1096         }
1097         void **qps = dev->data->queue_pairs;
1098
1099         if (qps[queue_pair_id]) {
1100                 CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1101                         queue_pair_id, dev_id);
1102                 return 1;
1103         }
1104
1105         CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1106                 queue_pair_id, dev_id);
1107
1108         return 0;
1109 }
1110
1111 int
1112 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1113                 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1114
1115 {
1116         struct rte_cryptodev *dev;
1117
1118         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1119                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1120                 return -EINVAL;
1121         }
1122
1123         dev = &rte_crypto_devices[dev_id];
1124         if (queue_pair_id >= dev->data->nb_queue_pairs) {
1125                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1126                 return -EINVAL;
1127         }
1128
1129         if (!qp_conf) {
1130                 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1131                 return -EINVAL;
1132         }
1133
1134         if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1135                         (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1136                 CDEV_LOG_ERR("Invalid mempools\n");
1137                 return -EINVAL;
1138         }
1139
1140         if (qp_conf->mp_session) {
1141                 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1142                 uint32_t obj_size = qp_conf->mp_session->elt_size;
1143                 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1144                 struct rte_cryptodev_sym_session s = {0};
1145
1146                 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1147                 if (!pool_priv || qp_conf->mp_session->private_data_size <
1148                                 sizeof(*pool_priv)) {
1149                         CDEV_LOG_ERR("Invalid mempool\n");
1150                         return -EINVAL;
1151                 }
1152
1153                 s.nb_drivers = pool_priv->nb_drivers;
1154                 s.user_data_sz = pool_priv->user_data_sz;
1155
1156                 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1157                         obj_size) || (s.nb_drivers <= dev->driver_id) ||
1158                         rte_cryptodev_sym_get_private_session_size(dev_id) >
1159                                 obj_priv_size) {
1160                         CDEV_LOG_ERR("Invalid mempool\n");
1161                         return -EINVAL;
1162                 }
1163         }
1164
1165         if (dev->data->dev_started) {
1166                 CDEV_LOG_ERR(
1167                     "device %d must be stopped to allow configuration", dev_id);
1168                 return -EBUSY;
1169         }
1170
1171         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1172
1173         rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1174         return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1175                         socket_id);
1176 }
1177
1178
1179 int
1180 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1181 {
1182         struct rte_cryptodev *dev;
1183
1184         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1185                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1186                 return -ENODEV;
1187         }
1188
1189         if (stats == NULL) {
1190                 CDEV_LOG_ERR("Invalid stats ptr");
1191                 return -EINVAL;
1192         }
1193
1194         dev = &rte_crypto_devices[dev_id];
1195         memset(stats, 0, sizeof(*stats));
1196
1197         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1198         (*dev->dev_ops->stats_get)(dev, stats);
1199         return 0;
1200 }
1201
1202 void
1203 rte_cryptodev_stats_reset(uint8_t dev_id)
1204 {
1205         struct rte_cryptodev *dev;
1206
1207         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1208                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1209                 return;
1210         }
1211
1212         dev = &rte_crypto_devices[dev_id];
1213
1214         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1215         (*dev->dev_ops->stats_reset)(dev);
1216 }
1217
1218 static void
1219 get_v20_capabilities(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1220 {
1221         const struct rte_cryptodev_capabilities *capability;
1222         uint8_t found_invalid_capa = 0;
1223         uint8_t counter = 0;
1224
1225         for (capability = dev_info->capabilities;
1226                         capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
1227                         ++capability, ++counter) {
1228                 if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
1229                                 capability->sym.xform_type ==
1230                                         RTE_CRYPTO_SYM_XFORM_AEAD
1231                                 && capability->sym.aead.algo >=
1232                                 RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
1233                         found_invalid_capa = 1;
1234                         counter--;
1235                 }
1236         }
1237         is_capability_checked[dev_id] = 1;
1238         if (!found_invalid_capa)
1239                 return;
1240         capability_copy[dev_id] = malloc(counter *
1241                 sizeof(struct rte_cryptodev_capabilities));
1242         if (capability_copy[dev_id] == NULL) {
1243                  /*
1244                   * error case - no memory to store the trimmed
1245                   * list, so have to return an empty list
1246                   */
1247                 dev_info->capabilities =
1248                         cryptodev_undefined_capabilities;
1249                 is_capability_checked[dev_id] = 0;
1250         } else {
1251                 counter = 0;
1252                 for (capability = dev_info->capabilities;
1253                                 capability->op !=
1254                                 RTE_CRYPTO_OP_TYPE_UNDEFINED;
1255                                 capability++) {
1256                         if (!(capability->op ==
1257                                 RTE_CRYPTO_OP_TYPE_SYMMETRIC
1258                                 && capability->sym.xform_type ==
1259                                 RTE_CRYPTO_SYM_XFORM_AEAD
1260                                 && capability->sym.aead.algo >=
1261                                 RTE_CRYPTO_AEAD_CHACHA20_POLY1305)) {
1262                                 capability_copy[dev_id][counter++] =
1263                                                 *capability;
1264                         }
1265                 }
1266                 dev_info->capabilities =
1267                                 capability_copy[dev_id];
1268         }
1269 }
1270
1271 void __vsym
1272 rte_cryptodev_info_get_v20(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1273 {
1274         struct rte_cryptodev *dev;
1275
1276         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1277                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1278                 return;
1279         }
1280
1281         dev = &rte_crypto_devices[dev_id];
1282
1283         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1284
1285         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1286         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1287
1288         if (capability_copy[dev_id] == NULL) {
1289                 if (!is_capability_checked[dev_id])
1290                         get_v20_capabilities(dev_id, dev_info);
1291         } else
1292                 dev_info->capabilities = capability_copy[dev_id];
1293
1294         dev_info->driver_name = dev->device->driver->name;
1295         dev_info->device = dev->device;
1296 }
1297 VERSION_SYMBOL(rte_cryptodev_info_get, _v20, 20.0);
1298
1299 void __vsym
1300 rte_cryptodev_info_get_v21(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1301 {
1302         struct rte_cryptodev *dev;
1303
1304         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1305                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1306                 return;
1307         }
1308
1309         dev = &rte_crypto_devices[dev_id];
1310
1311         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1312
1313         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1314         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1315
1316         dev_info->driver_name = dev->device->driver->name;
1317         dev_info->device = dev->device;
1318 }
1319 MAP_STATIC_SYMBOL(void rte_cryptodev_info_get(uint8_t dev_id,
1320         struct rte_cryptodev_info *dev_info), rte_cryptodev_info_get_v21);
1321 BIND_DEFAULT_SYMBOL(rte_cryptodev_info_get, _v21, 21);
1322
1323 int
1324 rte_cryptodev_callback_register(uint8_t dev_id,
1325                         enum rte_cryptodev_event_type event,
1326                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1327 {
1328         struct rte_cryptodev *dev;
1329         struct rte_cryptodev_callback *user_cb;
1330
1331         if (!cb_fn)
1332                 return -EINVAL;
1333
1334         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1335                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1336                 return -EINVAL;
1337         }
1338
1339         dev = &rte_crypto_devices[dev_id];
1340         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1341
1342         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1343                 if (user_cb->cb_fn == cb_fn &&
1344                         user_cb->cb_arg == cb_arg &&
1345                         user_cb->event == event) {
1346                         break;
1347                 }
1348         }
1349
1350         /* create a new callback. */
1351         if (user_cb == NULL) {
1352                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1353                                 sizeof(struct rte_cryptodev_callback), 0);
1354                 if (user_cb != NULL) {
1355                         user_cb->cb_fn = cb_fn;
1356                         user_cb->cb_arg = cb_arg;
1357                         user_cb->event = event;
1358                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1359                 }
1360         }
1361
1362         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1363         return (user_cb == NULL) ? -ENOMEM : 0;
1364 }
1365
1366 int
1367 rte_cryptodev_callback_unregister(uint8_t dev_id,
1368                         enum rte_cryptodev_event_type event,
1369                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1370 {
1371         int ret;
1372         struct rte_cryptodev *dev;
1373         struct rte_cryptodev_callback *cb, *next;
1374
1375         if (!cb_fn)
1376                 return -EINVAL;
1377
1378         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1379                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1380                 return -EINVAL;
1381         }
1382
1383         dev = &rte_crypto_devices[dev_id];
1384         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1385
1386         ret = 0;
1387         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1388
1389                 next = TAILQ_NEXT(cb, next);
1390
1391                 if (cb->cb_fn != cb_fn || cb->event != event ||
1392                                 (cb->cb_arg != (void *)-1 &&
1393                                 cb->cb_arg != cb_arg))
1394                         continue;
1395
1396                 /*
1397                  * if this callback is not executing right now,
1398                  * then remove it.
1399                  */
1400                 if (cb->active == 0) {
1401                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1402                         rte_free(cb);
1403                 } else {
1404                         ret = -EAGAIN;
1405                 }
1406         }
1407
1408         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1409         return ret;
1410 }
1411
1412 void
1413 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1414         enum rte_cryptodev_event_type event)
1415 {
1416         struct rte_cryptodev_callback *cb_lst;
1417         struct rte_cryptodev_callback dev_cb;
1418
1419         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1420         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1421                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1422                         continue;
1423                 dev_cb = *cb_lst;
1424                 cb_lst->active = 1;
1425                 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1426                 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1427                                                 dev_cb.cb_arg);
1428                 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1429                 cb_lst->active = 0;
1430         }
1431         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1432 }
1433
1434
1435 int
1436 rte_cryptodev_sym_session_init(uint8_t dev_id,
1437                 struct rte_cryptodev_sym_session *sess,
1438                 struct rte_crypto_sym_xform *xforms,
1439                 struct rte_mempool *mp)
1440 {
1441         struct rte_cryptodev *dev;
1442         uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1443                         dev_id);
1444         uint8_t index;
1445         int ret;
1446
1447         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1448                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1449                 return -EINVAL;
1450         }
1451
1452         dev = rte_cryptodev_pmd_get_dev(dev_id);
1453
1454         if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1455                 return -EINVAL;
1456
1457         if (mp->elt_size < sess_priv_sz)
1458                 return -EINVAL;
1459
1460         index = dev->driver_id;
1461         if (index >= sess->nb_drivers)
1462                 return -EINVAL;
1463
1464         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1465
1466         if (sess->sess_data[index].refcnt == 0) {
1467                 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1468                                                         sess, mp);
1469                 if (ret < 0) {
1470                         CDEV_LOG_ERR(
1471                                 "dev_id %d failed to configure session details",
1472                                 dev_id);
1473                         return ret;
1474                 }
1475         }
1476
1477         rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1478         sess->sess_data[index].refcnt++;
1479         return 0;
1480 }
1481
1482 int
1483 rte_cryptodev_asym_session_init(uint8_t dev_id,
1484                 struct rte_cryptodev_asym_session *sess,
1485                 struct rte_crypto_asym_xform *xforms,
1486                 struct rte_mempool *mp)
1487 {
1488         struct rte_cryptodev *dev;
1489         uint8_t index;
1490         int ret;
1491
1492         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1493                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1494                 return -EINVAL;
1495         }
1496
1497         dev = rte_cryptodev_pmd_get_dev(dev_id);
1498
1499         if (sess == NULL || xforms == NULL || dev == NULL)
1500                 return -EINVAL;
1501
1502         index = dev->driver_id;
1503
1504         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1505                                 -ENOTSUP);
1506
1507         if (sess->sess_private_data[index] == NULL) {
1508                 ret = dev->dev_ops->asym_session_configure(dev,
1509                                                         xforms,
1510                                                         sess, mp);
1511                 if (ret < 0) {
1512                         CDEV_LOG_ERR(
1513                                 "dev_id %d failed to configure session details",
1514                                 dev_id);
1515                         return ret;
1516                 }
1517         }
1518
1519         rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1520         return 0;
1521 }
1522
1523 struct rte_mempool *
1524 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1525         uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1526         int socket_id)
1527 {
1528         struct rte_mempool *mp;
1529         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1530         uint32_t obj_sz;
1531
1532         obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1533         if (obj_sz > elt_size)
1534                 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1535                                 obj_sz);
1536         else
1537                 obj_sz = elt_size;
1538
1539         mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1540                         (uint32_t)(sizeof(*pool_priv)),
1541                         NULL, NULL, NULL, NULL,
1542                         socket_id, 0);
1543         if (mp == NULL) {
1544                 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1545                         __func__, name, rte_errno);
1546                 return NULL;
1547         }
1548
1549         pool_priv = rte_mempool_get_priv(mp);
1550         if (!pool_priv) {
1551                 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1552                         __func__, name);
1553                 rte_mempool_free(mp);
1554                 return NULL;
1555         }
1556
1557         pool_priv->nb_drivers = nb_drivers;
1558         pool_priv->user_data_sz = user_data_size;
1559
1560         rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1561                 elt_size, cache_size, user_data_size, mp);
1562         return mp;
1563 }
1564
1565 static unsigned int
1566 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1567 {
1568         return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1569                         sess->user_data_sz;
1570 }
1571
1572 static uint8_t
1573 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1574 {
1575         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1576
1577         if (!mp)
1578                 return 0;
1579
1580         pool_priv = rte_mempool_get_priv(mp);
1581
1582         if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1583                         pool_priv->nb_drivers != nb_drivers ||
1584                         mp->elt_size <
1585                                 rte_cryptodev_sym_get_header_session_size()
1586                                 + pool_priv->user_data_sz)
1587                 return 0;
1588
1589         return 1;
1590 }
1591
1592 struct rte_cryptodev_sym_session *
1593 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1594 {
1595         struct rte_cryptodev_sym_session *sess;
1596         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1597
1598         if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1599                 CDEV_LOG_ERR("Invalid mempool\n");
1600                 return NULL;
1601         }
1602
1603         pool_priv = rte_mempool_get_priv(mp);
1604
1605         /* Allocate a session structure from the session pool */
1606         if (rte_mempool_get(mp, (void **)&sess)) {
1607                 CDEV_LOG_ERR("couldn't get object from session mempool");
1608                 return NULL;
1609         }
1610
1611         sess->nb_drivers = pool_priv->nb_drivers;
1612         sess->user_data_sz = pool_priv->user_data_sz;
1613         sess->opaque_data = 0;
1614
1615         /* Clear device session pointer.
1616          * Include the flag indicating presence of user data
1617          */
1618         memset(sess->sess_data, 0,
1619                         rte_cryptodev_sym_session_data_size(sess));
1620
1621         rte_cryptodev_trace_sym_session_create(mp, sess);
1622         return sess;
1623 }
1624
1625 struct rte_cryptodev_asym_session *
1626 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1627 {
1628         struct rte_cryptodev_asym_session *sess;
1629         unsigned int session_size =
1630                         rte_cryptodev_asym_get_header_session_size();
1631
1632         if (!mp) {
1633                 CDEV_LOG_ERR("invalid mempool\n");
1634                 return NULL;
1635         }
1636
1637         /* Verify if provided mempool can hold elements big enough. */
1638         if (mp->elt_size < session_size) {
1639                 CDEV_LOG_ERR(
1640                         "mempool elements too small to hold session objects");
1641                 return NULL;
1642         }
1643
1644         /* Allocate a session structure from the session pool */
1645         if (rte_mempool_get(mp, (void **)&sess)) {
1646                 CDEV_LOG_ERR("couldn't get object from session mempool");
1647                 return NULL;
1648         }
1649
1650         /* Clear device session pointer.
1651          * Include the flag indicating presence of private data
1652          */
1653         memset(sess, 0, session_size);
1654
1655         rte_cryptodev_trace_asym_session_create(mp, sess);
1656         return sess;
1657 }
1658
1659 int
1660 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1661                 struct rte_cryptodev_sym_session *sess)
1662 {
1663         struct rte_cryptodev *dev;
1664         uint8_t driver_id;
1665
1666         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1667                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1668                 return -EINVAL;
1669         }
1670
1671         dev = rte_cryptodev_pmd_get_dev(dev_id);
1672
1673         if (dev == NULL || sess == NULL)
1674                 return -EINVAL;
1675
1676         driver_id = dev->driver_id;
1677         if (sess->sess_data[driver_id].refcnt == 0)
1678                 return 0;
1679         if (--sess->sess_data[driver_id].refcnt != 0)
1680                 return -EBUSY;
1681
1682         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1683
1684         dev->dev_ops->sym_session_clear(dev, sess);
1685
1686         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1687         return 0;
1688 }
1689
1690 int
1691 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1692                 struct rte_cryptodev_asym_session *sess)
1693 {
1694         struct rte_cryptodev *dev;
1695
1696         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1697                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1698                 return -EINVAL;
1699         }
1700
1701         dev = rte_cryptodev_pmd_get_dev(dev_id);
1702
1703         if (dev == NULL || sess == NULL)
1704                 return -EINVAL;
1705
1706         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1707
1708         dev->dev_ops->asym_session_clear(dev, sess);
1709
1710         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1711         return 0;
1712 }
1713
1714 int
1715 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1716 {
1717         uint8_t i;
1718         struct rte_mempool *sess_mp;
1719
1720         if (sess == NULL)
1721                 return -EINVAL;
1722
1723         /* Check that all device private data has been freed */
1724         for (i = 0; i < sess->nb_drivers; i++) {
1725                 if (sess->sess_data[i].refcnt != 0)
1726                         return -EBUSY;
1727         }
1728
1729         /* Return session to mempool */
1730         sess_mp = rte_mempool_from_obj(sess);
1731         rte_mempool_put(sess_mp, sess);
1732
1733         rte_cryptodev_trace_sym_session_free(sess);
1734         return 0;
1735 }
1736
1737 int
1738 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1739 {
1740         uint8_t i;
1741         void *sess_priv;
1742         struct rte_mempool *sess_mp;
1743
1744         if (sess == NULL)
1745                 return -EINVAL;
1746
1747         /* Check that all device private data has been freed */
1748         for (i = 0; i < nb_drivers; i++) {
1749                 sess_priv = get_asym_session_private_data(sess, i);
1750                 if (sess_priv != NULL)
1751                         return -EBUSY;
1752         }
1753
1754         /* Return session to mempool */
1755         sess_mp = rte_mempool_from_obj(sess);
1756         rte_mempool_put(sess_mp, sess);
1757
1758         rte_cryptodev_trace_asym_session_free(sess);
1759         return 0;
1760 }
1761
1762 unsigned int
1763 rte_cryptodev_sym_get_header_session_size(void)
1764 {
1765         /*
1766          * Header contains pointers to the private data of all registered
1767          * drivers and all necessary information to ensure safely clear
1768          * or free al session.
1769          */
1770         struct rte_cryptodev_sym_session s = {0};
1771
1772         s.nb_drivers = nb_drivers;
1773
1774         return (unsigned int)(sizeof(s) +
1775                         rte_cryptodev_sym_session_data_size(&s));
1776 }
1777
1778 unsigned int
1779 rte_cryptodev_sym_get_existing_header_session_size(
1780                 struct rte_cryptodev_sym_session *sess)
1781 {
1782         if (!sess)
1783                 return 0;
1784         else
1785                 return (unsigned int)(sizeof(*sess) +
1786                                 rte_cryptodev_sym_session_data_size(sess));
1787 }
1788
1789 unsigned int
1790 rte_cryptodev_asym_get_header_session_size(void)
1791 {
1792         /*
1793          * Header contains pointers to the private data
1794          * of all registered drivers, and a flag which
1795          * indicates presence of private data
1796          */
1797         return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1798 }
1799
1800 unsigned int
1801 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1802 {
1803         struct rte_cryptodev *dev;
1804         unsigned int priv_sess_size;
1805
1806         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1807                 return 0;
1808
1809         dev = rte_cryptodev_pmd_get_dev(dev_id);
1810
1811         if (*dev->dev_ops->sym_session_get_size == NULL)
1812                 return 0;
1813
1814         priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1815
1816         return priv_sess_size;
1817 }
1818
1819 unsigned int
1820 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1821 {
1822         struct rte_cryptodev *dev;
1823         unsigned int header_size = sizeof(void *) * nb_drivers;
1824         unsigned int priv_sess_size;
1825
1826         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1827                 return 0;
1828
1829         dev = rte_cryptodev_pmd_get_dev(dev_id);
1830
1831         if (*dev->dev_ops->asym_session_get_size == NULL)
1832                 return 0;
1833
1834         priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1835         if (priv_sess_size < header_size)
1836                 return header_size;
1837
1838         return priv_sess_size;
1839
1840 }
1841
1842 int
1843 rte_cryptodev_sym_session_set_user_data(
1844                                         struct rte_cryptodev_sym_session *sess,
1845                                         void *data,
1846                                         uint16_t size)
1847 {
1848         if (sess == NULL)
1849                 return -EINVAL;
1850
1851         if (sess->user_data_sz < size)
1852                 return -ENOMEM;
1853
1854         rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1855         return 0;
1856 }
1857
1858 void *
1859 rte_cryptodev_sym_session_get_user_data(
1860                                         struct rte_cryptodev_sym_session *sess)
1861 {
1862         if (sess == NULL || sess->user_data_sz == 0)
1863                 return NULL;
1864
1865         return (void *)(sess->sess_data + sess->nb_drivers);
1866 }
1867
1868 static inline void
1869 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
1870 {
1871         uint32_t i;
1872         for (i = 0; i < vec->num; i++)
1873                 vec->status[i] = errnum;
1874 }
1875
1876 uint32_t
1877 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1878         struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1879         struct rte_crypto_sym_vec *vec)
1880 {
1881         struct rte_cryptodev *dev;
1882
1883         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1884                 sym_crypto_fill_status(vec, EINVAL);
1885                 return 0;
1886         }
1887
1888         dev = rte_cryptodev_pmd_get_dev(dev_id);
1889
1890         if (*dev->dev_ops->sym_cpu_process == NULL ||
1891                 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
1892                 sym_crypto_fill_status(vec, ENOTSUP);
1893                 return 0;
1894         }
1895
1896         return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
1897 }
1898
1899 /** Initialise rte_crypto_op mempool element */
1900 static void
1901 rte_crypto_op_init(struct rte_mempool *mempool,
1902                 void *opaque_arg,
1903                 void *_op_data,
1904                 __rte_unused unsigned i)
1905 {
1906         struct rte_crypto_op *op = _op_data;
1907         enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1908
1909         memset(_op_data, 0, mempool->elt_size);
1910
1911         __rte_crypto_op_reset(op, type);
1912
1913         op->phys_addr = rte_mem_virt2iova(_op_data);
1914         op->mempool = mempool;
1915 }
1916
1917
1918 struct rte_mempool *
1919 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1920                 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1921                 int socket_id)
1922 {
1923         struct rte_crypto_op_pool_private *priv;
1924
1925         unsigned elt_size = sizeof(struct rte_crypto_op) +
1926                         priv_size;
1927
1928         if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1929                 elt_size += sizeof(struct rte_crypto_sym_op);
1930         } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1931                 elt_size += sizeof(struct rte_crypto_asym_op);
1932         } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1933                 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1934                                     sizeof(struct rte_crypto_asym_op));
1935         } else {
1936                 CDEV_LOG_ERR("Invalid op_type\n");
1937                 return NULL;
1938         }
1939
1940         /* lookup mempool in case already allocated */
1941         struct rte_mempool *mp = rte_mempool_lookup(name);
1942
1943         if (mp != NULL) {
1944                 priv = (struct rte_crypto_op_pool_private *)
1945                                 rte_mempool_get_priv(mp);
1946
1947                 if (mp->elt_size != elt_size ||
1948                                 mp->cache_size < cache_size ||
1949                                 mp->size < nb_elts ||
1950                                 priv->priv_size <  priv_size) {
1951                         mp = NULL;
1952                         CDEV_LOG_ERR("Mempool %s already exists but with "
1953                                         "incompatible parameters", name);
1954                         return NULL;
1955                 }
1956                 return mp;
1957         }
1958
1959         mp = rte_mempool_create(
1960                         name,
1961                         nb_elts,
1962                         elt_size,
1963                         cache_size,
1964                         sizeof(struct rte_crypto_op_pool_private),
1965                         NULL,
1966                         NULL,
1967                         rte_crypto_op_init,
1968                         &type,
1969                         socket_id,
1970                         0);
1971
1972         if (mp == NULL) {
1973                 CDEV_LOG_ERR("Failed to create mempool %s", name);
1974                 return NULL;
1975         }
1976
1977         priv = (struct rte_crypto_op_pool_private *)
1978                         rte_mempool_get_priv(mp);
1979
1980         priv->priv_size = priv_size;
1981         priv->type = type;
1982
1983         return mp;
1984 }
1985
1986 int
1987 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1988 {
1989         struct rte_cryptodev *dev = NULL;
1990         uint32_t i = 0;
1991
1992         if (name == NULL)
1993                 return -EINVAL;
1994
1995         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1996                 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1997                                 "%s_%u", dev_name_prefix, i);
1998
1999                 if (ret < 0)
2000                         return ret;
2001
2002                 dev = rte_cryptodev_pmd_get_named_dev(name);
2003                 if (!dev)
2004                         return 0;
2005         }
2006
2007         return -1;
2008 }
2009
2010 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2011
2012 static struct cryptodev_driver_list cryptodev_driver_list =
2013         TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2014
2015 int
2016 rte_cryptodev_driver_id_get(const char *name)
2017 {
2018         struct cryptodev_driver *driver;
2019         const char *driver_name;
2020
2021         if (name == NULL) {
2022                 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2023                 return -1;
2024         }
2025
2026         TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2027                 driver_name = driver->driver->name;
2028                 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2029                         return driver->id;
2030         }
2031         return -1;
2032 }
2033
2034 const char *
2035 rte_cryptodev_name_get(uint8_t dev_id)
2036 {
2037         struct rte_cryptodev *dev;
2038
2039         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2040                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2041                 return NULL;
2042         }
2043
2044         dev = rte_cryptodev_pmd_get_dev(dev_id);
2045         if (dev == NULL)
2046                 return NULL;
2047
2048         return dev->data->name;
2049 }
2050
2051 const char *
2052 rte_cryptodev_driver_name_get(uint8_t driver_id)
2053 {
2054         struct cryptodev_driver *driver;
2055
2056         TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2057                 if (driver->id == driver_id)
2058                         return driver->driver->name;
2059         return NULL;
2060 }
2061
2062 uint8_t
2063 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2064                 const struct rte_driver *drv)
2065 {
2066         crypto_drv->driver = drv;
2067         crypto_drv->id = nb_drivers;
2068
2069         TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2070
2071         return nb_drivers++;
2072 }