2ce606af85289129e90be681717555603e93d04a
[dpdk.git] / lib / librte_cryptodev / rte_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43 #include "rte_cryptodev_trace.h"
44
45 static uint8_t nb_drivers;
46
47 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
48
49 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
50
51 static struct rte_cryptodev_global cryptodev_globals = {
52                 .devs                   = rte_crypto_devices,
53                 .data                   = { NULL },
54                 .nb_devs                = 0
55 };
56
57 /* spinlock for crypto device callbacks */
58 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
59
60
61 /**
62  * The user application callback description.
63  *
64  * It contains callback address to be registered by user application,
65  * the pointer to the parameters for callback, and the event type.
66  */
67 struct rte_cryptodev_callback {
68         TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
69         rte_cryptodev_cb_fn cb_fn;              /**< Callback address */
70         void *cb_arg;                           /**< Parameter for callback */
71         enum rte_cryptodev_event_type event;    /**< Interrupt event type */
72         uint32_t active;                        /**< Callback is executing */
73 };
74
75 /**
76  * The crypto cipher algorithm strings identifiers.
77  * It could be used in application command line.
78  */
79 const char *
80 rte_crypto_cipher_algorithm_strings[] = {
81         [RTE_CRYPTO_CIPHER_3DES_CBC]    = "3des-cbc",
82         [RTE_CRYPTO_CIPHER_3DES_ECB]    = "3des-ecb",
83         [RTE_CRYPTO_CIPHER_3DES_CTR]    = "3des-ctr",
84
85         [RTE_CRYPTO_CIPHER_AES_CBC]     = "aes-cbc",
86         [RTE_CRYPTO_CIPHER_AES_CTR]     = "aes-ctr",
87         [RTE_CRYPTO_CIPHER_AES_DOCSISBPI]       = "aes-docsisbpi",
88         [RTE_CRYPTO_CIPHER_AES_ECB]     = "aes-ecb",
89         [RTE_CRYPTO_CIPHER_AES_F8]      = "aes-f8",
90         [RTE_CRYPTO_CIPHER_AES_XTS]     = "aes-xts",
91
92         [RTE_CRYPTO_CIPHER_ARC4]        = "arc4",
93
94         [RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
95         [RTE_CRYPTO_CIPHER_DES_DOCSISBPI]       = "des-docsisbpi",
96
97         [RTE_CRYPTO_CIPHER_NULL]        = "null",
98
99         [RTE_CRYPTO_CIPHER_KASUMI_F8]   = "kasumi-f8",
100         [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
101         [RTE_CRYPTO_CIPHER_ZUC_EEA3]    = "zuc-eea3"
102 };
103
104 /**
105  * The crypto cipher operation strings identifiers.
106  * It could be used in application command line.
107  */
108 const char *
109 rte_crypto_cipher_operation_strings[] = {
110                 [RTE_CRYPTO_CIPHER_OP_ENCRYPT]  = "encrypt",
111                 [RTE_CRYPTO_CIPHER_OP_DECRYPT]  = "decrypt"
112 };
113
114 /**
115  * The crypto auth algorithm strings identifiers.
116  * It could be used in application command line.
117  */
118 const char *
119 rte_crypto_auth_algorithm_strings[] = {
120         [RTE_CRYPTO_AUTH_AES_CBC_MAC]   = "aes-cbc-mac",
121         [RTE_CRYPTO_AUTH_AES_CMAC]      = "aes-cmac",
122         [RTE_CRYPTO_AUTH_AES_GMAC]      = "aes-gmac",
123         [RTE_CRYPTO_AUTH_AES_XCBC_MAC]  = "aes-xcbc-mac",
124
125         [RTE_CRYPTO_AUTH_MD5]           = "md5",
126         [RTE_CRYPTO_AUTH_MD5_HMAC]      = "md5-hmac",
127
128         [RTE_CRYPTO_AUTH_NULL]          = "null",
129
130         [RTE_CRYPTO_AUTH_SHA1]          = "sha1",
131         [RTE_CRYPTO_AUTH_SHA1_HMAC]     = "sha1-hmac",
132
133         [RTE_CRYPTO_AUTH_SHA224]        = "sha2-224",
134         [RTE_CRYPTO_AUTH_SHA224_HMAC]   = "sha2-224-hmac",
135         [RTE_CRYPTO_AUTH_SHA256]        = "sha2-256",
136         [RTE_CRYPTO_AUTH_SHA256_HMAC]   = "sha2-256-hmac",
137         [RTE_CRYPTO_AUTH_SHA384]        = "sha2-384",
138         [RTE_CRYPTO_AUTH_SHA384_HMAC]   = "sha2-384-hmac",
139         [RTE_CRYPTO_AUTH_SHA512]        = "sha2-512",
140         [RTE_CRYPTO_AUTH_SHA512_HMAC]   = "sha2-512-hmac",
141
142         [RTE_CRYPTO_AUTH_KASUMI_F9]     = "kasumi-f9",
143         [RTE_CRYPTO_AUTH_SNOW3G_UIA2]   = "snow3g-uia2",
144         [RTE_CRYPTO_AUTH_ZUC_EIA3]      = "zuc-eia3"
145 };
146
147 /**
148  * The crypto AEAD algorithm strings identifiers.
149  * It could be used in application command line.
150  */
151 const char *
152 rte_crypto_aead_algorithm_strings[] = {
153         [RTE_CRYPTO_AEAD_AES_CCM]       = "aes-ccm",
154         [RTE_CRYPTO_AEAD_AES_GCM]       = "aes-gcm",
155         [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
156 };
157
158 /**
159  * The crypto AEAD operation strings identifiers.
160  * It could be used in application command line.
161  */
162 const char *
163 rte_crypto_aead_operation_strings[] = {
164         [RTE_CRYPTO_AEAD_OP_ENCRYPT]    = "encrypt",
165         [RTE_CRYPTO_AEAD_OP_DECRYPT]    = "decrypt"
166 };
167
168 /**
169  * Asymmetric crypto transform operation strings identifiers.
170  */
171 const char *rte_crypto_asym_xform_strings[] = {
172         [RTE_CRYPTO_ASYM_XFORM_NONE]    = "none",
173         [RTE_CRYPTO_ASYM_XFORM_RSA]     = "rsa",
174         [RTE_CRYPTO_ASYM_XFORM_MODEX]   = "modexp",
175         [RTE_CRYPTO_ASYM_XFORM_MODINV]  = "modinv",
176         [RTE_CRYPTO_ASYM_XFORM_DH]      = "dh",
177         [RTE_CRYPTO_ASYM_XFORM_DSA]     = "dsa",
178         [RTE_CRYPTO_ASYM_XFORM_ECDSA]   = "ecdsa",
179         [RTE_CRYPTO_ASYM_XFORM_ECPM]    = "ecpm",
180 };
181
182 /**
183  * Asymmetric crypto operation strings identifiers.
184  */
185 const char *rte_crypto_asym_op_strings[] = {
186         [RTE_CRYPTO_ASYM_OP_ENCRYPT]    = "encrypt",
187         [RTE_CRYPTO_ASYM_OP_DECRYPT]    = "decrypt",
188         [RTE_CRYPTO_ASYM_OP_SIGN]       = "sign",
189         [RTE_CRYPTO_ASYM_OP_VERIFY]     = "verify",
190         [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]       = "priv_key_generate",
191         [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
192         [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
193 };
194
195 /**
196  * The private data structure stored in the session mempool private data.
197  */
198 struct rte_cryptodev_sym_session_pool_private_data {
199         uint16_t nb_drivers;
200         /**< number of elements in sess_data array */
201         uint16_t user_data_sz;
202         /**< session user data will be placed after sess_data */
203 };
204
205 int
206 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
207                 const char *algo_string)
208 {
209         unsigned int i;
210
211         for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
212                 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
213                         *algo_enum = (enum rte_crypto_cipher_algorithm) i;
214                         return 0;
215                 }
216         }
217
218         /* Invalid string */
219         return -1;
220 }
221
222 int
223 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
224                 const char *algo_string)
225 {
226         unsigned int i;
227
228         for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
229                 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
230                         *algo_enum = (enum rte_crypto_auth_algorithm) i;
231                         return 0;
232                 }
233         }
234
235         /* Invalid string */
236         return -1;
237 }
238
239 int
240 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
241                 const char *algo_string)
242 {
243         unsigned int i;
244
245         for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
246                 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
247                         *algo_enum = (enum rte_crypto_aead_algorithm) i;
248                         return 0;
249                 }
250         }
251
252         /* Invalid string */
253         return -1;
254 }
255
256 int
257 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
258                 const char *xform_string)
259 {
260         unsigned int i;
261
262         for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
263                 if (strcmp(xform_string,
264                         rte_crypto_asym_xform_strings[i]) == 0) {
265                         *xform_enum = (enum rte_crypto_asym_xform_type) i;
266                         return 0;
267                 }
268         }
269
270         /* Invalid string */
271         return -1;
272 }
273
274 /**
275  * The crypto auth operation strings identifiers.
276  * It could be used in application command line.
277  */
278 const char *
279 rte_crypto_auth_operation_strings[] = {
280                 [RTE_CRYPTO_AUTH_OP_VERIFY]     = "verify",
281                 [RTE_CRYPTO_AUTH_OP_GENERATE]   = "generate"
282 };
283
284 const struct rte_cryptodev_symmetric_capability *
285 rte_cryptodev_sym_capability_get(uint8_t dev_id,
286                 const struct rte_cryptodev_sym_capability_idx *idx)
287 {
288         const struct rte_cryptodev_capabilities *capability;
289         struct rte_cryptodev_info dev_info;
290         int i = 0;
291
292         rte_cryptodev_info_get(dev_id, &dev_info);
293
294         while ((capability = &dev_info.capabilities[i++])->op !=
295                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
296                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
297                         continue;
298
299                 if (capability->sym.xform_type != idx->type)
300                         continue;
301
302                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
303                         capability->sym.auth.algo == idx->algo.auth)
304                         return &capability->sym;
305
306                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
307                         capability->sym.cipher.algo == idx->algo.cipher)
308                         return &capability->sym;
309
310                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
311                                 capability->sym.aead.algo == idx->algo.aead)
312                         return &capability->sym;
313         }
314
315         return NULL;
316
317 }
318
319 static int
320 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
321 {
322         unsigned int next_size;
323
324         /* Check lower/upper bounds */
325         if (size < range->min)
326                 return -1;
327
328         if (size > range->max)
329                 return -1;
330
331         /* If range is actually only one value, size is correct */
332         if (range->increment == 0)
333                 return 0;
334
335         /* Check if value is one of the supported sizes */
336         for (next_size = range->min; next_size <= range->max;
337                         next_size += range->increment)
338                 if (size == next_size)
339                         return 0;
340
341         return -1;
342 }
343
344 const struct rte_cryptodev_asymmetric_xform_capability *
345 rte_cryptodev_asym_capability_get(uint8_t dev_id,
346                 const struct rte_cryptodev_asym_capability_idx *idx)
347 {
348         const struct rte_cryptodev_capabilities *capability;
349         struct rte_cryptodev_info dev_info;
350         unsigned int i = 0;
351
352         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
353         rte_cryptodev_info_get(dev_id, &dev_info);
354
355         while ((capability = &dev_info.capabilities[i++])->op !=
356                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
357                 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
358                         continue;
359
360                 if (capability->asym.xform_capa.xform_type == idx->type)
361                         return &capability->asym.xform_capa;
362         }
363         return NULL;
364 };
365
366 int
367 rte_cryptodev_sym_capability_check_cipher(
368                 const struct rte_cryptodev_symmetric_capability *capability,
369                 uint16_t key_size, uint16_t iv_size)
370 {
371         if (param_range_check(key_size, &capability->cipher.key_size) != 0)
372                 return -1;
373
374         if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
375                 return -1;
376
377         return 0;
378 }
379
380 int
381 rte_cryptodev_sym_capability_check_auth(
382                 const struct rte_cryptodev_symmetric_capability *capability,
383                 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
384 {
385         if (param_range_check(key_size, &capability->auth.key_size) != 0)
386                 return -1;
387
388         if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
389                 return -1;
390
391         if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
392                 return -1;
393
394         return 0;
395 }
396
397 int
398 rte_cryptodev_sym_capability_check_aead(
399                 const struct rte_cryptodev_symmetric_capability *capability,
400                 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
401                 uint16_t iv_size)
402 {
403         if (param_range_check(key_size, &capability->aead.key_size) != 0)
404                 return -1;
405
406         if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
407                 return -1;
408
409         if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
410                 return -1;
411
412         if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
413                 return -1;
414
415         return 0;
416 }
417 int
418 rte_cryptodev_asym_xform_capability_check_optype(
419         const struct rte_cryptodev_asymmetric_xform_capability *capability,
420         enum rte_crypto_asym_op_type op_type)
421 {
422         if (capability->op_types & (1 << op_type))
423                 return 1;
424
425         return 0;
426 }
427
428 int
429 rte_cryptodev_asym_xform_capability_check_modlen(
430         const struct rte_cryptodev_asymmetric_xform_capability *capability,
431         uint16_t modlen)
432 {
433         /* no need to check for limits, if min or max = 0 */
434         if (capability->modlen.min != 0) {
435                 if (modlen < capability->modlen.min)
436                         return -1;
437         }
438
439         if (capability->modlen.max != 0) {
440                 if (modlen > capability->modlen.max)
441                         return -1;
442         }
443
444         /* in any case, check if given modlen is module increment */
445         if (capability->modlen.increment != 0) {
446                 if (modlen % (capability->modlen.increment))
447                         return -1;
448         }
449
450         return 0;
451 }
452
453
454 const char *
455 rte_cryptodev_get_feature_name(uint64_t flag)
456 {
457         switch (flag) {
458         case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
459                 return "SYMMETRIC_CRYPTO";
460         case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
461                 return "ASYMMETRIC_CRYPTO";
462         case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
463                 return "SYM_OPERATION_CHAINING";
464         case RTE_CRYPTODEV_FF_CPU_SSE:
465                 return "CPU_SSE";
466         case RTE_CRYPTODEV_FF_CPU_AVX:
467                 return "CPU_AVX";
468         case RTE_CRYPTODEV_FF_CPU_AVX2:
469                 return "CPU_AVX2";
470         case RTE_CRYPTODEV_FF_CPU_AVX512:
471                 return "CPU_AVX512";
472         case RTE_CRYPTODEV_FF_CPU_AESNI:
473                 return "CPU_AESNI";
474         case RTE_CRYPTODEV_FF_HW_ACCELERATED:
475                 return "HW_ACCELERATED";
476         case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
477                 return "IN_PLACE_SGL";
478         case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
479                 return "OOP_SGL_IN_SGL_OUT";
480         case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
481                 return "OOP_SGL_IN_LB_OUT";
482         case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
483                 return "OOP_LB_IN_SGL_OUT";
484         case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
485                 return "OOP_LB_IN_LB_OUT";
486         case RTE_CRYPTODEV_FF_CPU_NEON:
487                 return "CPU_NEON";
488         case RTE_CRYPTODEV_FF_CPU_ARM_CE:
489                 return "CPU_ARM_CE";
490         case RTE_CRYPTODEV_FF_SECURITY:
491                 return "SECURITY_PROTOCOL";
492         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
493                 return "RSA_PRIV_OP_KEY_EXP";
494         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
495                 return "RSA_PRIV_OP_KEY_QT";
496         case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
497                 return "DIGEST_ENCRYPTED";
498         case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
499                 return "SYM_CPU_CRYPTO";
500         case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
501                 return "ASYM_SESSIONLESS";
502         case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
503                 return "SYM_SESSIONLESS";
504         case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
505                 return "NON_BYTE_ALIGNED_DATA";
506         default:
507                 return NULL;
508         }
509 }
510
511 struct rte_cryptodev *
512 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
513 {
514         return &cryptodev_globals.devs[dev_id];
515 }
516
517 struct rte_cryptodev *
518 rte_cryptodev_pmd_get_named_dev(const char *name)
519 {
520         struct rte_cryptodev *dev;
521         unsigned int i;
522
523         if (name == NULL)
524                 return NULL;
525
526         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
527                 dev = &cryptodev_globals.devs[i];
528
529                 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
530                                 (strcmp(dev->data->name, name) == 0))
531                         return dev;
532         }
533
534         return NULL;
535 }
536
537 static inline uint8_t
538 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
539 {
540         if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
541                         rte_crypto_devices[dev_id].data == NULL)
542                 return 0;
543
544         return 1;
545 }
546
547 unsigned int
548 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
549 {
550         struct rte_cryptodev *dev = NULL;
551
552         if (!rte_cryptodev_is_valid_device_data(dev_id))
553                 return 0;
554
555         dev = rte_cryptodev_pmd_get_dev(dev_id);
556         if (dev->attached != RTE_CRYPTODEV_ATTACHED)
557                 return 0;
558         else
559                 return 1;
560 }
561
562
563 int
564 rte_cryptodev_get_dev_id(const char *name)
565 {
566         unsigned i;
567
568         if (name == NULL)
569                 return -1;
570
571         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
572                 if (!rte_cryptodev_is_valid_device_data(i))
573                         continue;
574                 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
575                                 == 0) &&
576                                 (cryptodev_globals.devs[i].attached ==
577                                                 RTE_CRYPTODEV_ATTACHED))
578                         return i;
579         }
580
581         return -1;
582 }
583
584 uint8_t
585 rte_cryptodev_count(void)
586 {
587         return cryptodev_globals.nb_devs;
588 }
589
590 uint8_t
591 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
592 {
593         uint8_t i, dev_count = 0;
594
595         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
596                 if (cryptodev_globals.devs[i].driver_id == driver_id &&
597                         cryptodev_globals.devs[i].attached ==
598                                         RTE_CRYPTODEV_ATTACHED)
599                         dev_count++;
600
601         return dev_count;
602 }
603
604 uint8_t
605 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
606         uint8_t nb_devices)
607 {
608         uint8_t i, count = 0;
609         struct rte_cryptodev *devs = cryptodev_globals.devs;
610
611         for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
612                 if (!rte_cryptodev_is_valid_device_data(i))
613                         continue;
614
615                 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
616                         int cmp;
617
618                         cmp = strncmp(devs[i].device->driver->name,
619                                         driver_name,
620                                         strlen(driver_name) + 1);
621
622                         if (cmp == 0)
623                                 devices[count++] = devs[i].data->dev_id;
624                 }
625         }
626
627         return count;
628 }
629
630 void *
631 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
632 {
633         if (dev_id < RTE_CRYPTO_MAX_DEVS &&
634                         (rte_crypto_devices[dev_id].feature_flags &
635                         RTE_CRYPTODEV_FF_SECURITY))
636                 return rte_crypto_devices[dev_id].security_ctx;
637
638         return NULL;
639 }
640
641 int
642 rte_cryptodev_socket_id(uint8_t dev_id)
643 {
644         struct rte_cryptodev *dev;
645
646         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
647                 return -1;
648
649         dev = rte_cryptodev_pmd_get_dev(dev_id);
650
651         return dev->data->socket_id;
652 }
653
654 static inline int
655 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
656                 int socket_id)
657 {
658         char mz_name[RTE_MEMZONE_NAMESIZE];
659         const struct rte_memzone *mz;
660         int n;
661
662         /* generate memzone name */
663         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
664         if (n >= (int)sizeof(mz_name))
665                 return -EINVAL;
666
667         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
668                 mz = rte_memzone_reserve(mz_name,
669                                 sizeof(struct rte_cryptodev_data),
670                                 socket_id, 0);
671         } else
672                 mz = rte_memzone_lookup(mz_name);
673
674         if (mz == NULL)
675                 return -ENOMEM;
676
677         *data = mz->addr;
678         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
679                 memset(*data, 0, sizeof(struct rte_cryptodev_data));
680
681         return 0;
682 }
683
684 static inline int
685 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
686 {
687         char mz_name[RTE_MEMZONE_NAMESIZE];
688         const struct rte_memzone *mz;
689         int n;
690
691         /* generate memzone name */
692         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
693         if (n >= (int)sizeof(mz_name))
694                 return -EINVAL;
695
696         mz = rte_memzone_lookup(mz_name);
697         if (mz == NULL)
698                 return -ENOMEM;
699
700         RTE_ASSERT(*data == mz->addr);
701         *data = NULL;
702
703         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
704                 return rte_memzone_free(mz);
705
706         return 0;
707 }
708
709 static uint8_t
710 rte_cryptodev_find_free_device_index(void)
711 {
712         uint8_t dev_id;
713
714         for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
715                 if (rte_crypto_devices[dev_id].attached ==
716                                 RTE_CRYPTODEV_DETACHED)
717                         return dev_id;
718         }
719         return RTE_CRYPTO_MAX_DEVS;
720 }
721
722 struct rte_cryptodev *
723 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
724 {
725         struct rte_cryptodev *cryptodev;
726         uint8_t dev_id;
727
728         if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
729                 CDEV_LOG_ERR("Crypto device with name %s already "
730                                 "allocated!", name);
731                 return NULL;
732         }
733
734         dev_id = rte_cryptodev_find_free_device_index();
735         if (dev_id == RTE_CRYPTO_MAX_DEVS) {
736                 CDEV_LOG_ERR("Reached maximum number of crypto devices");
737                 return NULL;
738         }
739
740         cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
741
742         if (cryptodev->data == NULL) {
743                 struct rte_cryptodev_data **cryptodev_data =
744                                 &cryptodev_globals.data[dev_id];
745
746                 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
747                                 socket_id);
748
749                 if (retval < 0 || *cryptodev_data == NULL)
750                         return NULL;
751
752                 cryptodev->data = *cryptodev_data;
753
754                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
755                         strlcpy(cryptodev->data->name, name,
756                                 RTE_CRYPTODEV_NAME_MAX_LEN);
757
758                         cryptodev->data->dev_id = dev_id;
759                         cryptodev->data->socket_id = socket_id;
760                         cryptodev->data->dev_started = 0;
761                 }
762
763                 /* init user callbacks */
764                 TAILQ_INIT(&(cryptodev->link_intr_cbs));
765
766                 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
767
768                 cryptodev_globals.nb_devs++;
769         }
770
771         return cryptodev;
772 }
773
774 int
775 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
776 {
777         int ret;
778         uint8_t dev_id;
779
780         if (cryptodev == NULL)
781                 return -EINVAL;
782
783         dev_id = cryptodev->data->dev_id;
784
785         /* Close device only if device operations have been set */
786         if (cryptodev->dev_ops) {
787                 ret = rte_cryptodev_close(dev_id);
788                 if (ret < 0)
789                         return ret;
790         }
791
792         ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
793         if (ret < 0)
794                 return ret;
795
796         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
797         cryptodev_globals.nb_devs--;
798         return 0;
799 }
800
801 uint16_t
802 rte_cryptodev_queue_pair_count(uint8_t dev_id)
803 {
804         struct rte_cryptodev *dev;
805
806         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
807                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
808                 return 0;
809         }
810
811         dev = &rte_crypto_devices[dev_id];
812         return dev->data->nb_queue_pairs;
813 }
814
815 static int
816 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
817                 int socket_id)
818 {
819         struct rte_cryptodev_info dev_info;
820         void **qp;
821         unsigned i;
822
823         if ((dev == NULL) || (nb_qpairs < 1)) {
824                 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
825                                                         dev, nb_qpairs);
826                 return -EINVAL;
827         }
828
829         CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
830                         nb_qpairs, dev->data->dev_id);
831
832         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
833
834         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
835         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
836
837         if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
838                 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
839                                 nb_qpairs, dev->data->dev_id);
840             return -EINVAL;
841         }
842
843         if (dev->data->queue_pairs == NULL) { /* first time configuration */
844                 dev->data->queue_pairs = rte_zmalloc_socket(
845                                 "cryptodev->queue_pairs",
846                                 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
847                                 RTE_CACHE_LINE_SIZE, socket_id);
848
849                 if (dev->data->queue_pairs == NULL) {
850                         dev->data->nb_queue_pairs = 0;
851                         CDEV_LOG_ERR("failed to get memory for qp meta data, "
852                                                         "nb_queues %u",
853                                                         nb_qpairs);
854                         return -(ENOMEM);
855                 }
856         } else { /* re-configure */
857                 int ret;
858                 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
859
860                 qp = dev->data->queue_pairs;
861
862                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
863                                 -ENOTSUP);
864
865                 for (i = nb_qpairs; i < old_nb_queues; i++) {
866                         ret = (*dev->dev_ops->queue_pair_release)(dev, i);
867                         if (ret < 0)
868                                 return ret;
869                 }
870
871                 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
872                                 RTE_CACHE_LINE_SIZE);
873                 if (qp == NULL) {
874                         CDEV_LOG_ERR("failed to realloc qp meta data,"
875                                                 " nb_queues %u", nb_qpairs);
876                         return -(ENOMEM);
877                 }
878
879                 if (nb_qpairs > old_nb_queues) {
880                         uint16_t new_qs = nb_qpairs - old_nb_queues;
881
882                         memset(qp + old_nb_queues, 0,
883                                 sizeof(qp[0]) * new_qs);
884                 }
885
886                 dev->data->queue_pairs = qp;
887
888         }
889         dev->data->nb_queue_pairs = nb_qpairs;
890         return 0;
891 }
892
893 int
894 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
895 {
896         struct rte_cryptodev *dev;
897         int diag;
898
899         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
900                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
901                 return -EINVAL;
902         }
903
904         dev = &rte_crypto_devices[dev_id];
905
906         if (dev->data->dev_started) {
907                 CDEV_LOG_ERR(
908                     "device %d must be stopped to allow configuration", dev_id);
909                 return -EBUSY;
910         }
911
912         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
913
914         /* Setup new number of queue pairs and reconfigure device. */
915         diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
916                         config->socket_id);
917         if (diag != 0) {
918                 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
919                                 dev_id, diag);
920                 return diag;
921         }
922
923         rte_cryptodev_trace_configure(dev_id, config);
924         return (*dev->dev_ops->dev_configure)(dev, config);
925 }
926
927
928 int
929 rte_cryptodev_start(uint8_t dev_id)
930 {
931         struct rte_cryptodev *dev;
932         int diag;
933
934         CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
935
936         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
937                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
938                 return -EINVAL;
939         }
940
941         dev = &rte_crypto_devices[dev_id];
942
943         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
944
945         if (dev->data->dev_started != 0) {
946                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
947                         dev_id);
948                 return 0;
949         }
950
951         diag = (*dev->dev_ops->dev_start)(dev);
952         rte_cryptodev_trace_start(dev_id, diag);
953         if (diag == 0)
954                 dev->data->dev_started = 1;
955         else
956                 return diag;
957
958         return 0;
959 }
960
961 void
962 rte_cryptodev_stop(uint8_t dev_id)
963 {
964         struct rte_cryptodev *dev;
965
966         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
967                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
968                 return;
969         }
970
971         dev = &rte_crypto_devices[dev_id];
972
973         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
974
975         if (dev->data->dev_started == 0) {
976                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
977                         dev_id);
978                 return;
979         }
980
981         (*dev->dev_ops->dev_stop)(dev);
982         rte_cryptodev_trace_stop(dev_id);
983         dev->data->dev_started = 0;
984 }
985
986 int
987 rte_cryptodev_close(uint8_t dev_id)
988 {
989         struct rte_cryptodev *dev;
990         int retval;
991
992         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
993                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
994                 return -1;
995         }
996
997         dev = &rte_crypto_devices[dev_id];
998
999         /* Device must be stopped before it can be closed */
1000         if (dev->data->dev_started == 1) {
1001                 CDEV_LOG_ERR("Device %u must be stopped before closing",
1002                                 dev_id);
1003                 return -EBUSY;
1004         }
1005
1006         /* We can't close the device if there are outstanding sessions in use */
1007         if (dev->data->session_pool != NULL) {
1008                 if (!rte_mempool_full(dev->data->session_pool)) {
1009                         CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1010                                         "has sessions still in use, free "
1011                                         "all sessions before calling close",
1012                                         (unsigned)dev_id);
1013                         return -EBUSY;
1014                 }
1015         }
1016
1017         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1018         retval = (*dev->dev_ops->dev_close)(dev);
1019         rte_cryptodev_trace_close(dev_id, retval);
1020
1021         if (retval < 0)
1022                 return retval;
1023
1024         return 0;
1025 }
1026
1027 int
1028 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1029                 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1030
1031 {
1032         struct rte_cryptodev *dev;
1033
1034         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1035                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1036                 return -EINVAL;
1037         }
1038
1039         dev = &rte_crypto_devices[dev_id];
1040         if (queue_pair_id >= dev->data->nb_queue_pairs) {
1041                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1042                 return -EINVAL;
1043         }
1044
1045         if (!qp_conf) {
1046                 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1047                 return -EINVAL;
1048         }
1049
1050         if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1051                         (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1052                 CDEV_LOG_ERR("Invalid mempools\n");
1053                 return -EINVAL;
1054         }
1055
1056         if (qp_conf->mp_session) {
1057                 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1058                 uint32_t obj_size = qp_conf->mp_session->elt_size;
1059                 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1060                 struct rte_cryptodev_sym_session s = {0};
1061
1062                 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1063                 if (!pool_priv || qp_conf->mp_session->private_data_size <
1064                                 sizeof(*pool_priv)) {
1065                         CDEV_LOG_ERR("Invalid mempool\n");
1066                         return -EINVAL;
1067                 }
1068
1069                 s.nb_drivers = pool_priv->nb_drivers;
1070                 s.user_data_sz = pool_priv->user_data_sz;
1071
1072                 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1073                         obj_size) || (s.nb_drivers <= dev->driver_id) ||
1074                         rte_cryptodev_sym_get_private_session_size(dev_id) >
1075                                 obj_priv_size) {
1076                         CDEV_LOG_ERR("Invalid mempool\n");
1077                         return -EINVAL;
1078                 }
1079         }
1080
1081         if (dev->data->dev_started) {
1082                 CDEV_LOG_ERR(
1083                     "device %d must be stopped to allow configuration", dev_id);
1084                 return -EBUSY;
1085         }
1086
1087         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1088
1089         rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1090         return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1091                         socket_id);
1092 }
1093
1094
1095 int
1096 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1097 {
1098         struct rte_cryptodev *dev;
1099
1100         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1101                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1102                 return -ENODEV;
1103         }
1104
1105         if (stats == NULL) {
1106                 CDEV_LOG_ERR("Invalid stats ptr");
1107                 return -EINVAL;
1108         }
1109
1110         dev = &rte_crypto_devices[dev_id];
1111         memset(stats, 0, sizeof(*stats));
1112
1113         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1114         (*dev->dev_ops->stats_get)(dev, stats);
1115         return 0;
1116 }
1117
1118 void
1119 rte_cryptodev_stats_reset(uint8_t dev_id)
1120 {
1121         struct rte_cryptodev *dev;
1122
1123         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1124                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1125                 return;
1126         }
1127
1128         dev = &rte_crypto_devices[dev_id];
1129
1130         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1131         (*dev->dev_ops->stats_reset)(dev);
1132 }
1133
1134
1135 void
1136 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1137 {
1138         struct rte_cryptodev *dev;
1139
1140         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1141                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1142                 return;
1143         }
1144
1145         dev = &rte_crypto_devices[dev_id];
1146
1147         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1148
1149         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1150         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1151
1152         dev_info->driver_name = dev->device->driver->name;
1153         dev_info->device = dev->device;
1154 }
1155
1156
1157 int
1158 rte_cryptodev_callback_register(uint8_t dev_id,
1159                         enum rte_cryptodev_event_type event,
1160                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1161 {
1162         struct rte_cryptodev *dev;
1163         struct rte_cryptodev_callback *user_cb;
1164
1165         if (!cb_fn)
1166                 return -EINVAL;
1167
1168         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1169                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1170                 return -EINVAL;
1171         }
1172
1173         dev = &rte_crypto_devices[dev_id];
1174         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1175
1176         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1177                 if (user_cb->cb_fn == cb_fn &&
1178                         user_cb->cb_arg == cb_arg &&
1179                         user_cb->event == event) {
1180                         break;
1181                 }
1182         }
1183
1184         /* create a new callback. */
1185         if (user_cb == NULL) {
1186                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1187                                 sizeof(struct rte_cryptodev_callback), 0);
1188                 if (user_cb != NULL) {
1189                         user_cb->cb_fn = cb_fn;
1190                         user_cb->cb_arg = cb_arg;
1191                         user_cb->event = event;
1192                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1193                 }
1194         }
1195
1196         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1197         return (user_cb == NULL) ? -ENOMEM : 0;
1198 }
1199
1200 int
1201 rte_cryptodev_callback_unregister(uint8_t dev_id,
1202                         enum rte_cryptodev_event_type event,
1203                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1204 {
1205         int ret;
1206         struct rte_cryptodev *dev;
1207         struct rte_cryptodev_callback *cb, *next;
1208
1209         if (!cb_fn)
1210                 return -EINVAL;
1211
1212         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1213                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1214                 return -EINVAL;
1215         }
1216
1217         dev = &rte_crypto_devices[dev_id];
1218         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1219
1220         ret = 0;
1221         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1222
1223                 next = TAILQ_NEXT(cb, next);
1224
1225                 if (cb->cb_fn != cb_fn || cb->event != event ||
1226                                 (cb->cb_arg != (void *)-1 &&
1227                                 cb->cb_arg != cb_arg))
1228                         continue;
1229
1230                 /*
1231                  * if this callback is not executing right now,
1232                  * then remove it.
1233                  */
1234                 if (cb->active == 0) {
1235                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1236                         rte_free(cb);
1237                 } else {
1238                         ret = -EAGAIN;
1239                 }
1240         }
1241
1242         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1243         return ret;
1244 }
1245
1246 void
1247 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1248         enum rte_cryptodev_event_type event)
1249 {
1250         struct rte_cryptodev_callback *cb_lst;
1251         struct rte_cryptodev_callback dev_cb;
1252
1253         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1254         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1255                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1256                         continue;
1257                 dev_cb = *cb_lst;
1258                 cb_lst->active = 1;
1259                 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1260                 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1261                                                 dev_cb.cb_arg);
1262                 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1263                 cb_lst->active = 0;
1264         }
1265         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1266 }
1267
1268
1269 int
1270 rte_cryptodev_sym_session_init(uint8_t dev_id,
1271                 struct rte_cryptodev_sym_session *sess,
1272                 struct rte_crypto_sym_xform *xforms,
1273                 struct rte_mempool *mp)
1274 {
1275         struct rte_cryptodev *dev;
1276         uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1277                         dev_id);
1278         uint8_t index;
1279         int ret;
1280
1281         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1282                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1283                 return -EINVAL;
1284         }
1285
1286         dev = rte_cryptodev_pmd_get_dev(dev_id);
1287
1288         if (sess == NULL || xforms == NULL || dev == NULL)
1289                 return -EINVAL;
1290
1291         if (mp->elt_size < sess_priv_sz)
1292                 return -EINVAL;
1293
1294         index = dev->driver_id;
1295         if (index >= sess->nb_drivers)
1296                 return -EINVAL;
1297
1298         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1299
1300         if (sess->sess_data[index].refcnt == 0) {
1301                 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1302                                                         sess, mp);
1303                 if (ret < 0) {
1304                         CDEV_LOG_ERR(
1305                                 "dev_id %d failed to configure session details",
1306                                 dev_id);
1307                         return ret;
1308                 }
1309         }
1310
1311         rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1312         sess->sess_data[index].refcnt++;
1313         return 0;
1314 }
1315
1316 int
1317 rte_cryptodev_asym_session_init(uint8_t dev_id,
1318                 struct rte_cryptodev_asym_session *sess,
1319                 struct rte_crypto_asym_xform *xforms,
1320                 struct rte_mempool *mp)
1321 {
1322         struct rte_cryptodev *dev;
1323         uint8_t index;
1324         int ret;
1325
1326         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1327                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1328                 return -EINVAL;
1329         }
1330
1331         dev = rte_cryptodev_pmd_get_dev(dev_id);
1332
1333         if (sess == NULL || xforms == NULL || dev == NULL)
1334                 return -EINVAL;
1335
1336         index = dev->driver_id;
1337
1338         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1339                                 -ENOTSUP);
1340
1341         if (sess->sess_private_data[index] == NULL) {
1342                 ret = dev->dev_ops->asym_session_configure(dev,
1343                                                         xforms,
1344                                                         sess, mp);
1345                 if (ret < 0) {
1346                         CDEV_LOG_ERR(
1347                                 "dev_id %d failed to configure session details",
1348                                 dev_id);
1349                         return ret;
1350                 }
1351         }
1352
1353         rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1354         return 0;
1355 }
1356
1357 struct rte_mempool *
1358 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1359         uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1360         int socket_id)
1361 {
1362         struct rte_mempool *mp;
1363         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1364         uint32_t obj_sz;
1365
1366         obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1367         if (obj_sz > elt_size)
1368                 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1369                                 obj_sz);
1370         else
1371                 obj_sz = elt_size;
1372
1373         mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1374                         (uint32_t)(sizeof(*pool_priv)),
1375                         NULL, NULL, NULL, NULL,
1376                         socket_id, 0);
1377         if (mp == NULL) {
1378                 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1379                         __func__, name, rte_errno);
1380                 return NULL;
1381         }
1382
1383         pool_priv = rte_mempool_get_priv(mp);
1384         if (!pool_priv) {
1385                 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1386                         __func__, name);
1387                 rte_mempool_free(mp);
1388                 return NULL;
1389         }
1390
1391         pool_priv->nb_drivers = nb_drivers;
1392         pool_priv->user_data_sz = user_data_size;
1393
1394         rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1395                 elt_size, cache_size, user_data_size, mp);
1396         return mp;
1397 }
1398
1399 static unsigned int
1400 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1401 {
1402         return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1403                         sess->user_data_sz;
1404 }
1405
1406 struct rte_cryptodev_sym_session *
1407 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1408 {
1409         struct rte_cryptodev_sym_session *sess;
1410         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1411
1412         if (!mp) {
1413                 CDEV_LOG_ERR("Invalid mempool\n");
1414                 return NULL;
1415         }
1416
1417         pool_priv = rte_mempool_get_priv(mp);
1418
1419         if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) {
1420                 CDEV_LOG_ERR("Invalid mempool\n");
1421                 return NULL;
1422         }
1423
1424         /* Allocate a session structure from the session pool */
1425         if (rte_mempool_get(mp, (void **)&sess)) {
1426                 CDEV_LOG_ERR("couldn't get object from session mempool");
1427                 return NULL;
1428         }
1429
1430         sess->nb_drivers = pool_priv->nb_drivers;
1431         sess->user_data_sz = pool_priv->user_data_sz;
1432         sess->opaque_data = 0;
1433
1434         /* Clear device session pointer.
1435          * Include the flag indicating presence of user data
1436          */
1437         memset(sess->sess_data, 0,
1438                         rte_cryptodev_sym_session_data_size(sess));
1439
1440         rte_cryptodev_trace_sym_session_create(mp, sess);
1441         return sess;
1442 }
1443
1444 struct rte_cryptodev_asym_session *
1445 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1446 {
1447         struct rte_cryptodev_asym_session *sess;
1448
1449         /* Allocate a session structure from the session pool */
1450         if (rte_mempool_get(mp, (void **)&sess)) {
1451                 CDEV_LOG_ERR("couldn't get object from session mempool");
1452                 return NULL;
1453         }
1454
1455         /* Clear device session pointer.
1456          * Include the flag indicating presence of private data
1457          */
1458         memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1459
1460         rte_cryptodev_trace_asym_session_create(mp, sess);
1461         return sess;
1462 }
1463
1464 int
1465 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1466                 struct rte_cryptodev_sym_session *sess)
1467 {
1468         struct rte_cryptodev *dev;
1469         uint8_t driver_id;
1470
1471         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1472                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1473                 return -EINVAL;
1474         }
1475
1476         dev = rte_cryptodev_pmd_get_dev(dev_id);
1477
1478         if (dev == NULL || sess == NULL)
1479                 return -EINVAL;
1480
1481         driver_id = dev->driver_id;
1482         if (sess->sess_data[driver_id].refcnt == 0)
1483                 return 0;
1484         if (--sess->sess_data[driver_id].refcnt != 0)
1485                 return -EBUSY;
1486
1487         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1488
1489         dev->dev_ops->sym_session_clear(dev, sess);
1490
1491         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1492         return 0;
1493 }
1494
1495 int
1496 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1497                 struct rte_cryptodev_asym_session *sess)
1498 {
1499         struct rte_cryptodev *dev;
1500
1501         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1502                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1503                 return -EINVAL;
1504         }
1505
1506         dev = rte_cryptodev_pmd_get_dev(dev_id);
1507
1508         if (dev == NULL || sess == NULL)
1509                 return -EINVAL;
1510
1511         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1512
1513         dev->dev_ops->asym_session_clear(dev, sess);
1514
1515         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1516         return 0;
1517 }
1518
1519 int
1520 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1521 {
1522         uint8_t i;
1523         struct rte_mempool *sess_mp;
1524
1525         if (sess == NULL)
1526                 return -EINVAL;
1527
1528         /* Check that all device private data has been freed */
1529         for (i = 0; i < sess->nb_drivers; i++) {
1530                 if (sess->sess_data[i].refcnt != 0)
1531                         return -EBUSY;
1532         }
1533
1534         /* Return session to mempool */
1535         sess_mp = rte_mempool_from_obj(sess);
1536         rte_mempool_put(sess_mp, sess);
1537
1538         rte_cryptodev_trace_sym_session_free(sess);
1539         return 0;
1540 }
1541
1542 int
1543 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1544 {
1545         uint8_t i;
1546         void *sess_priv;
1547         struct rte_mempool *sess_mp;
1548
1549         if (sess == NULL)
1550                 return -EINVAL;
1551
1552         /* Check that all device private data has been freed */
1553         for (i = 0; i < nb_drivers; i++) {
1554                 sess_priv = get_asym_session_private_data(sess, i);
1555                 if (sess_priv != NULL)
1556                         return -EBUSY;
1557         }
1558
1559         /* Return session to mempool */
1560         sess_mp = rte_mempool_from_obj(sess);
1561         rte_mempool_put(sess_mp, sess);
1562
1563         rte_cryptodev_trace_asym_session_free(sess);
1564         return 0;
1565 }
1566
1567 unsigned int
1568 rte_cryptodev_sym_get_header_session_size(void)
1569 {
1570         /*
1571          * Header contains pointers to the private data of all registered
1572          * drivers and all necessary information to ensure safely clear
1573          * or free al session.
1574          */
1575         struct rte_cryptodev_sym_session s = {0};
1576
1577         s.nb_drivers = nb_drivers;
1578
1579         return (unsigned int)(sizeof(s) +
1580                         rte_cryptodev_sym_session_data_size(&s));
1581 }
1582
1583 unsigned int
1584 rte_cryptodev_sym_get_existing_header_session_size(
1585                 struct rte_cryptodev_sym_session *sess)
1586 {
1587         if (!sess)
1588                 return 0;
1589         else
1590                 return (unsigned int)(sizeof(*sess) +
1591                                 rte_cryptodev_sym_session_data_size(sess));
1592 }
1593
1594 unsigned int
1595 rte_cryptodev_asym_get_header_session_size(void)
1596 {
1597         /*
1598          * Header contains pointers to the private data
1599          * of all registered drivers, and a flag which
1600          * indicates presence of private data
1601          */
1602         return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1603 }
1604
1605 unsigned int
1606 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1607 {
1608         struct rte_cryptodev *dev;
1609         unsigned int priv_sess_size;
1610
1611         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1612                 return 0;
1613
1614         dev = rte_cryptodev_pmd_get_dev(dev_id);
1615
1616         if (*dev->dev_ops->sym_session_get_size == NULL)
1617                 return 0;
1618
1619         priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1620
1621         return priv_sess_size;
1622 }
1623
1624 unsigned int
1625 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1626 {
1627         struct rte_cryptodev *dev;
1628         unsigned int header_size = sizeof(void *) * nb_drivers;
1629         unsigned int priv_sess_size;
1630
1631         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1632                 return 0;
1633
1634         dev = rte_cryptodev_pmd_get_dev(dev_id);
1635
1636         if (*dev->dev_ops->asym_session_get_size == NULL)
1637                 return 0;
1638
1639         priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1640         if (priv_sess_size < header_size)
1641                 return header_size;
1642
1643         return priv_sess_size;
1644
1645 }
1646
1647 int
1648 rte_cryptodev_sym_session_set_user_data(
1649                                         struct rte_cryptodev_sym_session *sess,
1650                                         void *data,
1651                                         uint16_t size)
1652 {
1653         if (sess == NULL)
1654                 return -EINVAL;
1655
1656         if (sess->user_data_sz < size)
1657                 return -ENOMEM;
1658
1659         rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1660         return 0;
1661 }
1662
1663 void *
1664 rte_cryptodev_sym_session_get_user_data(
1665                                         struct rte_cryptodev_sym_session *sess)
1666 {
1667         if (sess == NULL || sess->user_data_sz == 0)
1668                 return NULL;
1669
1670         return (void *)(sess->sess_data + sess->nb_drivers);
1671 }
1672
1673 static inline void
1674 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
1675 {
1676         uint32_t i;
1677         for (i = 0; i < vec->num; i++)
1678                 vec->status[i] = errnum;
1679 }
1680
1681 uint32_t
1682 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1683         struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1684         struct rte_crypto_sym_vec *vec)
1685 {
1686         struct rte_cryptodev *dev;
1687
1688         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1689                 sym_crypto_fill_status(vec, EINVAL);
1690                 return 0;
1691         }
1692
1693         dev = rte_cryptodev_pmd_get_dev(dev_id);
1694
1695         if (*dev->dev_ops->sym_cpu_process == NULL ||
1696                 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
1697                 sym_crypto_fill_status(vec, ENOTSUP);
1698                 return 0;
1699         }
1700
1701         return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
1702 }
1703
1704 /** Initialise rte_crypto_op mempool element */
1705 static void
1706 rte_crypto_op_init(struct rte_mempool *mempool,
1707                 void *opaque_arg,
1708                 void *_op_data,
1709                 __rte_unused unsigned i)
1710 {
1711         struct rte_crypto_op *op = _op_data;
1712         enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1713
1714         memset(_op_data, 0, mempool->elt_size);
1715
1716         __rte_crypto_op_reset(op, type);
1717
1718         op->phys_addr = rte_mem_virt2iova(_op_data);
1719         op->mempool = mempool;
1720 }
1721
1722
1723 struct rte_mempool *
1724 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1725                 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1726                 int socket_id)
1727 {
1728         struct rte_crypto_op_pool_private *priv;
1729
1730         unsigned elt_size = sizeof(struct rte_crypto_op) +
1731                         priv_size;
1732
1733         if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1734                 elt_size += sizeof(struct rte_crypto_sym_op);
1735         } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1736                 elt_size += sizeof(struct rte_crypto_asym_op);
1737         } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1738                 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1739                                     sizeof(struct rte_crypto_asym_op));
1740         } else {
1741                 CDEV_LOG_ERR("Invalid op_type\n");
1742                 return NULL;
1743         }
1744
1745         /* lookup mempool in case already allocated */
1746         struct rte_mempool *mp = rte_mempool_lookup(name);
1747
1748         if (mp != NULL) {
1749                 priv = (struct rte_crypto_op_pool_private *)
1750                                 rte_mempool_get_priv(mp);
1751
1752                 if (mp->elt_size != elt_size ||
1753                                 mp->cache_size < cache_size ||
1754                                 mp->size < nb_elts ||
1755                                 priv->priv_size <  priv_size) {
1756                         mp = NULL;
1757                         CDEV_LOG_ERR("Mempool %s already exists but with "
1758                                         "incompatible parameters", name);
1759                         return NULL;
1760                 }
1761                 return mp;
1762         }
1763
1764         mp = rte_mempool_create(
1765                         name,
1766                         nb_elts,
1767                         elt_size,
1768                         cache_size,
1769                         sizeof(struct rte_crypto_op_pool_private),
1770                         NULL,
1771                         NULL,
1772                         rte_crypto_op_init,
1773                         &type,
1774                         socket_id,
1775                         0);
1776
1777         if (mp == NULL) {
1778                 CDEV_LOG_ERR("Failed to create mempool %s", name);
1779                 return NULL;
1780         }
1781
1782         priv = (struct rte_crypto_op_pool_private *)
1783                         rte_mempool_get_priv(mp);
1784
1785         priv->priv_size = priv_size;
1786         priv->type = type;
1787
1788         return mp;
1789 }
1790
1791 int
1792 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1793 {
1794         struct rte_cryptodev *dev = NULL;
1795         uint32_t i = 0;
1796
1797         if (name == NULL)
1798                 return -EINVAL;
1799
1800         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1801                 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1802                                 "%s_%u", dev_name_prefix, i);
1803
1804                 if (ret < 0)
1805                         return ret;
1806
1807                 dev = rte_cryptodev_pmd_get_named_dev(name);
1808                 if (!dev)
1809                         return 0;
1810         }
1811
1812         return -1;
1813 }
1814
1815 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1816
1817 static struct cryptodev_driver_list cryptodev_driver_list =
1818         TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1819
1820 int
1821 rte_cryptodev_driver_id_get(const char *name)
1822 {
1823         struct cryptodev_driver *driver;
1824         const char *driver_name;
1825
1826         if (name == NULL) {
1827                 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1828                 return -1;
1829         }
1830
1831         TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1832                 driver_name = driver->driver->name;
1833                 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
1834                         return driver->id;
1835         }
1836         return -1;
1837 }
1838
1839 const char *
1840 rte_cryptodev_name_get(uint8_t dev_id)
1841 {
1842         struct rte_cryptodev *dev;
1843
1844         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1845                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1846                 return NULL;
1847         }
1848
1849         dev = rte_cryptodev_pmd_get_dev(dev_id);
1850         if (dev == NULL)
1851                 return NULL;
1852
1853         return dev->data->name;
1854 }
1855
1856 const char *
1857 rte_cryptodev_driver_name_get(uint8_t driver_id)
1858 {
1859         struct cryptodev_driver *driver;
1860
1861         TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1862                 if (driver->id == driver_id)
1863                         return driver->driver->name;
1864         return NULL;
1865 }
1866
1867 uint8_t
1868 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1869                 const struct rte_driver *drv)
1870 {
1871         crypto_drv->driver = drv;
1872         crypto_drv->id = nb_drivers;
1873
1874         TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1875
1876         return nb_drivers++;
1877 }