5c6359b5cc460c50eee6d3982085978a29fc96a4
[dpdk.git] / lib / librte_cryptodev / rte_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43
44 static uint8_t nb_drivers;
45
46 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
47
48 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
49
50 static struct rte_cryptodev_global cryptodev_globals = {
51                 .devs                   = rte_crypto_devices,
52                 .data                   = { NULL },
53                 .nb_devs                = 0
54 };
55
56 /* spinlock for crypto device callbacks */
57 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
58
59
60 /**
61  * The user application callback description.
62  *
63  * It contains callback address to be registered by user application,
64  * the pointer to the parameters for callback, and the event type.
65  */
66 struct rte_cryptodev_callback {
67         TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
68         rte_cryptodev_cb_fn cb_fn;              /**< Callback address */
69         void *cb_arg;                           /**< Parameter for callback */
70         enum rte_cryptodev_event_type event;    /**< Interrupt event type */
71         uint32_t active;                        /**< Callback is executing */
72 };
73
74 /**
75  * The crypto cipher algorithm strings identifiers.
76  * It could be used in application command line.
77  */
78 const char *
79 rte_crypto_cipher_algorithm_strings[] = {
80         [RTE_CRYPTO_CIPHER_3DES_CBC]    = "3des-cbc",
81         [RTE_CRYPTO_CIPHER_3DES_ECB]    = "3des-ecb",
82         [RTE_CRYPTO_CIPHER_3DES_CTR]    = "3des-ctr",
83
84         [RTE_CRYPTO_CIPHER_AES_CBC]     = "aes-cbc",
85         [RTE_CRYPTO_CIPHER_AES_CTR]     = "aes-ctr",
86         [RTE_CRYPTO_CIPHER_AES_DOCSISBPI]       = "aes-docsisbpi",
87         [RTE_CRYPTO_CIPHER_AES_ECB]     = "aes-ecb",
88         [RTE_CRYPTO_CIPHER_AES_F8]      = "aes-f8",
89         [RTE_CRYPTO_CIPHER_AES_XTS]     = "aes-xts",
90
91         [RTE_CRYPTO_CIPHER_ARC4]        = "arc4",
92
93         [RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
94         [RTE_CRYPTO_CIPHER_DES_DOCSISBPI]       = "des-docsisbpi",
95
96         [RTE_CRYPTO_CIPHER_NULL]        = "null",
97
98         [RTE_CRYPTO_CIPHER_KASUMI_F8]   = "kasumi-f8",
99         [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
100         [RTE_CRYPTO_CIPHER_ZUC_EEA3]    = "zuc-eea3"
101 };
102
103 /**
104  * The crypto cipher operation strings identifiers.
105  * It could be used in application command line.
106  */
107 const char *
108 rte_crypto_cipher_operation_strings[] = {
109                 [RTE_CRYPTO_CIPHER_OP_ENCRYPT]  = "encrypt",
110                 [RTE_CRYPTO_CIPHER_OP_DECRYPT]  = "decrypt"
111 };
112
113 /**
114  * The crypto auth algorithm strings identifiers.
115  * It could be used in application command line.
116  */
117 const char *
118 rte_crypto_auth_algorithm_strings[] = {
119         [RTE_CRYPTO_AUTH_AES_CBC_MAC]   = "aes-cbc-mac",
120         [RTE_CRYPTO_AUTH_AES_CMAC]      = "aes-cmac",
121         [RTE_CRYPTO_AUTH_AES_GMAC]      = "aes-gmac",
122         [RTE_CRYPTO_AUTH_AES_XCBC_MAC]  = "aes-xcbc-mac",
123
124         [RTE_CRYPTO_AUTH_MD5]           = "md5",
125         [RTE_CRYPTO_AUTH_MD5_HMAC]      = "md5-hmac",
126
127         [RTE_CRYPTO_AUTH_NULL]          = "null",
128
129         [RTE_CRYPTO_AUTH_SHA1]          = "sha1",
130         [RTE_CRYPTO_AUTH_SHA1_HMAC]     = "sha1-hmac",
131
132         [RTE_CRYPTO_AUTH_SHA224]        = "sha2-224",
133         [RTE_CRYPTO_AUTH_SHA224_HMAC]   = "sha2-224-hmac",
134         [RTE_CRYPTO_AUTH_SHA256]        = "sha2-256",
135         [RTE_CRYPTO_AUTH_SHA256_HMAC]   = "sha2-256-hmac",
136         [RTE_CRYPTO_AUTH_SHA384]        = "sha2-384",
137         [RTE_CRYPTO_AUTH_SHA384_HMAC]   = "sha2-384-hmac",
138         [RTE_CRYPTO_AUTH_SHA512]        = "sha2-512",
139         [RTE_CRYPTO_AUTH_SHA512_HMAC]   = "sha2-512-hmac",
140
141         [RTE_CRYPTO_AUTH_KASUMI_F9]     = "kasumi-f9",
142         [RTE_CRYPTO_AUTH_SNOW3G_UIA2]   = "snow3g-uia2",
143         [RTE_CRYPTO_AUTH_ZUC_EIA3]      = "zuc-eia3"
144 };
145
146 /**
147  * The crypto AEAD algorithm strings identifiers.
148  * It could be used in application command line.
149  */
150 const char *
151 rte_crypto_aead_algorithm_strings[] = {
152         [RTE_CRYPTO_AEAD_AES_CCM]       = "aes-ccm",
153         [RTE_CRYPTO_AEAD_AES_GCM]       = "aes-gcm",
154         [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
155 };
156
157 /**
158  * The crypto AEAD operation strings identifiers.
159  * It could be used in application command line.
160  */
161 const char *
162 rte_crypto_aead_operation_strings[] = {
163         [RTE_CRYPTO_AEAD_OP_ENCRYPT]    = "encrypt",
164         [RTE_CRYPTO_AEAD_OP_DECRYPT]    = "decrypt"
165 };
166
167 /**
168  * Asymmetric crypto transform operation strings identifiers.
169  */
170 const char *rte_crypto_asym_xform_strings[] = {
171         [RTE_CRYPTO_ASYM_XFORM_NONE]    = "none",
172         [RTE_CRYPTO_ASYM_XFORM_RSA]     = "rsa",
173         [RTE_CRYPTO_ASYM_XFORM_MODEX]   = "modexp",
174         [RTE_CRYPTO_ASYM_XFORM_MODINV]  = "modinv",
175         [RTE_CRYPTO_ASYM_XFORM_DH]      = "dh",
176         [RTE_CRYPTO_ASYM_XFORM_DSA]     = "dsa",
177         [RTE_CRYPTO_ASYM_XFORM_ECDSA]   = "ecdsa",
178         [RTE_CRYPTO_ASYM_XFORM_ECPM]    = "ecpm",
179 };
180
181 /**
182  * Asymmetric crypto operation strings identifiers.
183  */
184 const char *rte_crypto_asym_op_strings[] = {
185         [RTE_CRYPTO_ASYM_OP_ENCRYPT]    = "encrypt",
186         [RTE_CRYPTO_ASYM_OP_DECRYPT]    = "decrypt",
187         [RTE_CRYPTO_ASYM_OP_SIGN]       = "sign",
188         [RTE_CRYPTO_ASYM_OP_VERIFY]     = "verify",
189         [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]       = "priv_key_generate",
190         [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
191         [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
192 };
193
194 /**
195  * The private data structure stored in the session mempool private data.
196  */
197 struct rte_cryptodev_sym_session_pool_private_data {
198         uint16_t nb_drivers;
199         /**< number of elements in sess_data array */
200         uint16_t user_data_sz;
201         /**< session user data will be placed after sess_data */
202 };
203
204 int
205 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
206                 const char *algo_string)
207 {
208         unsigned int i;
209
210         for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
211                 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
212                         *algo_enum = (enum rte_crypto_cipher_algorithm) i;
213                         return 0;
214                 }
215         }
216
217         /* Invalid string */
218         return -1;
219 }
220
221 int
222 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
223                 const char *algo_string)
224 {
225         unsigned int i;
226
227         for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
228                 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
229                         *algo_enum = (enum rte_crypto_auth_algorithm) i;
230                         return 0;
231                 }
232         }
233
234         /* Invalid string */
235         return -1;
236 }
237
238 int
239 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
240                 const char *algo_string)
241 {
242         unsigned int i;
243
244         for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
245                 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
246                         *algo_enum = (enum rte_crypto_aead_algorithm) i;
247                         return 0;
248                 }
249         }
250
251         /* Invalid string */
252         return -1;
253 }
254
255 int
256 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
257                 const char *xform_string)
258 {
259         unsigned int i;
260
261         for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
262                 if (strcmp(xform_string,
263                         rte_crypto_asym_xform_strings[i]) == 0) {
264                         *xform_enum = (enum rte_crypto_asym_xform_type) i;
265                         return 0;
266                 }
267         }
268
269         /* Invalid string */
270         return -1;
271 }
272
273 /**
274  * The crypto auth operation strings identifiers.
275  * It could be used in application command line.
276  */
277 const char *
278 rte_crypto_auth_operation_strings[] = {
279                 [RTE_CRYPTO_AUTH_OP_VERIFY]     = "verify",
280                 [RTE_CRYPTO_AUTH_OP_GENERATE]   = "generate"
281 };
282
283 const struct rte_cryptodev_symmetric_capability *
284 rte_cryptodev_sym_capability_get(uint8_t dev_id,
285                 const struct rte_cryptodev_sym_capability_idx *idx)
286 {
287         const struct rte_cryptodev_capabilities *capability;
288         struct rte_cryptodev_info dev_info;
289         int i = 0;
290
291         rte_cryptodev_info_get(dev_id, &dev_info);
292
293         while ((capability = &dev_info.capabilities[i++])->op !=
294                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
295                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
296                         continue;
297
298                 if (capability->sym.xform_type != idx->type)
299                         continue;
300
301                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
302                         capability->sym.auth.algo == idx->algo.auth)
303                         return &capability->sym;
304
305                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
306                         capability->sym.cipher.algo == idx->algo.cipher)
307                         return &capability->sym;
308
309                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
310                                 capability->sym.aead.algo == idx->algo.aead)
311                         return &capability->sym;
312         }
313
314         return NULL;
315
316 }
317
318 static int
319 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
320 {
321         unsigned int next_size;
322
323         /* Check lower/upper bounds */
324         if (size < range->min)
325                 return -1;
326
327         if (size > range->max)
328                 return -1;
329
330         /* If range is actually only one value, size is correct */
331         if (range->increment == 0)
332                 return 0;
333
334         /* Check if value is one of the supported sizes */
335         for (next_size = range->min; next_size <= range->max;
336                         next_size += range->increment)
337                 if (size == next_size)
338                         return 0;
339
340         return -1;
341 }
342
343 const struct rte_cryptodev_asymmetric_xform_capability *
344 rte_cryptodev_asym_capability_get(uint8_t dev_id,
345                 const struct rte_cryptodev_asym_capability_idx *idx)
346 {
347         const struct rte_cryptodev_capabilities *capability;
348         struct rte_cryptodev_info dev_info;
349         unsigned int i = 0;
350
351         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
352         rte_cryptodev_info_get(dev_id, &dev_info);
353
354         while ((capability = &dev_info.capabilities[i++])->op !=
355                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
356                 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
357                         continue;
358
359                 if (capability->asym.xform_capa.xform_type == idx->type)
360                         return &capability->asym.xform_capa;
361         }
362         return NULL;
363 };
364
365 int
366 rte_cryptodev_sym_capability_check_cipher(
367                 const struct rte_cryptodev_symmetric_capability *capability,
368                 uint16_t key_size, uint16_t iv_size)
369 {
370         if (param_range_check(key_size, &capability->cipher.key_size) != 0)
371                 return -1;
372
373         if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
374                 return -1;
375
376         return 0;
377 }
378
379 int
380 rte_cryptodev_sym_capability_check_auth(
381                 const struct rte_cryptodev_symmetric_capability *capability,
382                 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
383 {
384         if (param_range_check(key_size, &capability->auth.key_size) != 0)
385                 return -1;
386
387         if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
388                 return -1;
389
390         if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
391                 return -1;
392
393         return 0;
394 }
395
396 int
397 rte_cryptodev_sym_capability_check_aead(
398                 const struct rte_cryptodev_symmetric_capability *capability,
399                 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
400                 uint16_t iv_size)
401 {
402         if (param_range_check(key_size, &capability->aead.key_size) != 0)
403                 return -1;
404
405         if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
406                 return -1;
407
408         if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
409                 return -1;
410
411         if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
412                 return -1;
413
414         return 0;
415 }
416 int
417 rte_cryptodev_asym_xform_capability_check_optype(
418         const struct rte_cryptodev_asymmetric_xform_capability *capability,
419         enum rte_crypto_asym_op_type op_type)
420 {
421         if (capability->op_types & (1 << op_type))
422                 return 1;
423
424         return 0;
425 }
426
427 int
428 rte_cryptodev_asym_xform_capability_check_modlen(
429         const struct rte_cryptodev_asymmetric_xform_capability *capability,
430         uint16_t modlen)
431 {
432         /* no need to check for limits, if min or max = 0 */
433         if (capability->modlen.min != 0) {
434                 if (modlen < capability->modlen.min)
435                         return -1;
436         }
437
438         if (capability->modlen.max != 0) {
439                 if (modlen > capability->modlen.max)
440                         return -1;
441         }
442
443         /* in any case, check if given modlen is module increment */
444         if (capability->modlen.increment != 0) {
445                 if (modlen % (capability->modlen.increment))
446                         return -1;
447         }
448
449         return 0;
450 }
451
452
453 const char *
454 rte_cryptodev_get_feature_name(uint64_t flag)
455 {
456         switch (flag) {
457         case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
458                 return "SYMMETRIC_CRYPTO";
459         case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
460                 return "ASYMMETRIC_CRYPTO";
461         case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
462                 return "SYM_OPERATION_CHAINING";
463         case RTE_CRYPTODEV_FF_CPU_SSE:
464                 return "CPU_SSE";
465         case RTE_CRYPTODEV_FF_CPU_AVX:
466                 return "CPU_AVX";
467         case RTE_CRYPTODEV_FF_CPU_AVX2:
468                 return "CPU_AVX2";
469         case RTE_CRYPTODEV_FF_CPU_AVX512:
470                 return "CPU_AVX512";
471         case RTE_CRYPTODEV_FF_CPU_AESNI:
472                 return "CPU_AESNI";
473         case RTE_CRYPTODEV_FF_HW_ACCELERATED:
474                 return "HW_ACCELERATED";
475         case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
476                 return "IN_PLACE_SGL";
477         case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
478                 return "OOP_SGL_IN_SGL_OUT";
479         case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
480                 return "OOP_SGL_IN_LB_OUT";
481         case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
482                 return "OOP_LB_IN_SGL_OUT";
483         case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
484                 return "OOP_LB_IN_LB_OUT";
485         case RTE_CRYPTODEV_FF_CPU_NEON:
486                 return "CPU_NEON";
487         case RTE_CRYPTODEV_FF_CPU_ARM_CE:
488                 return "CPU_ARM_CE";
489         case RTE_CRYPTODEV_FF_SECURITY:
490                 return "SECURITY_PROTOCOL";
491         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
492                 return "RSA_PRIV_OP_KEY_EXP";
493         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
494                 return "RSA_PRIV_OP_KEY_QT";
495         case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
496                 return "DIGEST_ENCRYPTED";
497         default:
498                 return NULL;
499         }
500 }
501
502 struct rte_cryptodev *
503 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
504 {
505         return &cryptodev_globals.devs[dev_id];
506 }
507
508 struct rte_cryptodev *
509 rte_cryptodev_pmd_get_named_dev(const char *name)
510 {
511         struct rte_cryptodev *dev;
512         unsigned int i;
513
514         if (name == NULL)
515                 return NULL;
516
517         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
518                 dev = &cryptodev_globals.devs[i];
519
520                 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
521                                 (strcmp(dev->data->name, name) == 0))
522                         return dev;
523         }
524
525         return NULL;
526 }
527
528 static inline uint8_t
529 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
530 {
531         if (rte_crypto_devices[dev_id].data == NULL)
532                 return 0;
533
534         return 1;
535 }
536
537 unsigned int
538 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
539 {
540         struct rte_cryptodev *dev = NULL;
541
542         if (!rte_cryptodev_is_valid_device_data(dev_id))
543                 return 0;
544
545         dev = rte_cryptodev_pmd_get_dev(dev_id);
546         if (dev->attached != RTE_CRYPTODEV_ATTACHED)
547                 return 0;
548         else
549                 return 1;
550 }
551
552
553 int
554 rte_cryptodev_get_dev_id(const char *name)
555 {
556         unsigned i;
557
558         if (name == NULL)
559                 return -1;
560
561         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
562                 if (!rte_cryptodev_is_valid_device_data(i))
563                         continue;
564                 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
565                                 == 0) &&
566                                 (cryptodev_globals.devs[i].attached ==
567                                                 RTE_CRYPTODEV_ATTACHED))
568                         return i;
569         }
570
571         return -1;
572 }
573
574 uint8_t
575 rte_cryptodev_count(void)
576 {
577         return cryptodev_globals.nb_devs;
578 }
579
580 uint8_t
581 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
582 {
583         uint8_t i, dev_count = 0;
584
585         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
586                 if (cryptodev_globals.devs[i].driver_id == driver_id &&
587                         cryptodev_globals.devs[i].attached ==
588                                         RTE_CRYPTODEV_ATTACHED)
589                         dev_count++;
590
591         return dev_count;
592 }
593
594 uint8_t
595 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
596         uint8_t nb_devices)
597 {
598         uint8_t i, count = 0;
599         struct rte_cryptodev *devs = cryptodev_globals.devs;
600
601         for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
602                 if (!rte_cryptodev_is_valid_device_data(i))
603                         continue;
604
605                 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
606                         int cmp;
607
608                         cmp = strncmp(devs[i].device->driver->name,
609                                         driver_name,
610                                         strlen(driver_name) + 1);
611
612                         if (cmp == 0)
613                                 devices[count++] = devs[i].data->dev_id;
614                 }
615         }
616
617         return count;
618 }
619
620 void *
621 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
622 {
623         if (rte_crypto_devices[dev_id].feature_flags &
624                         RTE_CRYPTODEV_FF_SECURITY)
625                 return rte_crypto_devices[dev_id].security_ctx;
626
627         return NULL;
628 }
629
630 int
631 rte_cryptodev_socket_id(uint8_t dev_id)
632 {
633         struct rte_cryptodev *dev;
634
635         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
636                 return -1;
637
638         dev = rte_cryptodev_pmd_get_dev(dev_id);
639
640         return dev->data->socket_id;
641 }
642
643 static inline int
644 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
645                 int socket_id)
646 {
647         char mz_name[RTE_MEMZONE_NAMESIZE];
648         const struct rte_memzone *mz;
649         int n;
650
651         /* generate memzone name */
652         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
653         if (n >= (int)sizeof(mz_name))
654                 return -EINVAL;
655
656         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
657                 mz = rte_memzone_reserve(mz_name,
658                                 sizeof(struct rte_cryptodev_data),
659                                 socket_id, 0);
660         } else
661                 mz = rte_memzone_lookup(mz_name);
662
663         if (mz == NULL)
664                 return -ENOMEM;
665
666         *data = mz->addr;
667         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
668                 memset(*data, 0, sizeof(struct rte_cryptodev_data));
669
670         return 0;
671 }
672
673 static inline int
674 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
675 {
676         char mz_name[RTE_MEMZONE_NAMESIZE];
677         const struct rte_memzone *mz;
678         int n;
679
680         /* generate memzone name */
681         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
682         if (n >= (int)sizeof(mz_name))
683                 return -EINVAL;
684
685         mz = rte_memzone_lookup(mz_name);
686         if (mz == NULL)
687                 return -ENOMEM;
688
689         RTE_ASSERT(*data == mz->addr);
690         *data = NULL;
691
692         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
693                 return rte_memzone_free(mz);
694
695         return 0;
696 }
697
698 static uint8_t
699 rte_cryptodev_find_free_device_index(void)
700 {
701         uint8_t dev_id;
702
703         for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
704                 if (rte_crypto_devices[dev_id].attached ==
705                                 RTE_CRYPTODEV_DETACHED)
706                         return dev_id;
707         }
708         return RTE_CRYPTO_MAX_DEVS;
709 }
710
711 struct rte_cryptodev *
712 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
713 {
714         struct rte_cryptodev *cryptodev;
715         uint8_t dev_id;
716
717         if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
718                 CDEV_LOG_ERR("Crypto device with name %s already "
719                                 "allocated!", name);
720                 return NULL;
721         }
722
723         dev_id = rte_cryptodev_find_free_device_index();
724         if (dev_id == RTE_CRYPTO_MAX_DEVS) {
725                 CDEV_LOG_ERR("Reached maximum number of crypto devices");
726                 return NULL;
727         }
728
729         cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
730
731         if (cryptodev->data == NULL) {
732                 struct rte_cryptodev_data **cryptodev_data =
733                                 &cryptodev_globals.data[dev_id];
734
735                 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
736                                 socket_id);
737
738                 if (retval < 0 || *cryptodev_data == NULL)
739                         return NULL;
740
741                 cryptodev->data = *cryptodev_data;
742
743                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
744                         strlcpy(cryptodev->data->name, name,
745                                 RTE_CRYPTODEV_NAME_MAX_LEN);
746
747                         cryptodev->data->dev_id = dev_id;
748                         cryptodev->data->socket_id = socket_id;
749                         cryptodev->data->dev_started = 0;
750                 }
751
752                 /* init user callbacks */
753                 TAILQ_INIT(&(cryptodev->link_intr_cbs));
754
755                 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
756
757                 cryptodev_globals.nb_devs++;
758         }
759
760         return cryptodev;
761 }
762
763 int
764 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
765 {
766         int ret;
767         uint8_t dev_id;
768
769         if (cryptodev == NULL)
770                 return -EINVAL;
771
772         dev_id = cryptodev->data->dev_id;
773
774         /* Close device only if device operations have been set */
775         if (cryptodev->dev_ops) {
776                 ret = rte_cryptodev_close(dev_id);
777                 if (ret < 0)
778                         return ret;
779         }
780
781         ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
782         if (ret < 0)
783                 return ret;
784
785         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
786         cryptodev_globals.nb_devs--;
787         return 0;
788 }
789
790 uint16_t
791 rte_cryptodev_queue_pair_count(uint8_t dev_id)
792 {
793         struct rte_cryptodev *dev;
794
795         dev = &rte_crypto_devices[dev_id];
796         return dev->data->nb_queue_pairs;
797 }
798
799 static int
800 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
801                 int socket_id)
802 {
803         struct rte_cryptodev_info dev_info;
804         void **qp;
805         unsigned i;
806
807         if ((dev == NULL) || (nb_qpairs < 1)) {
808                 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
809                                                         dev, nb_qpairs);
810                 return -EINVAL;
811         }
812
813         CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
814                         nb_qpairs, dev->data->dev_id);
815
816         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
817
818         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
819         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
820
821         if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
822                 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
823                                 nb_qpairs, dev->data->dev_id);
824             return -EINVAL;
825         }
826
827         if (dev->data->queue_pairs == NULL) { /* first time configuration */
828                 dev->data->queue_pairs = rte_zmalloc_socket(
829                                 "cryptodev->queue_pairs",
830                                 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
831                                 RTE_CACHE_LINE_SIZE, socket_id);
832
833                 if (dev->data->queue_pairs == NULL) {
834                         dev->data->nb_queue_pairs = 0;
835                         CDEV_LOG_ERR("failed to get memory for qp meta data, "
836                                                         "nb_queues %u",
837                                                         nb_qpairs);
838                         return -(ENOMEM);
839                 }
840         } else { /* re-configure */
841                 int ret;
842                 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
843
844                 qp = dev->data->queue_pairs;
845
846                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
847                                 -ENOTSUP);
848
849                 for (i = nb_qpairs; i < old_nb_queues; i++) {
850                         ret = (*dev->dev_ops->queue_pair_release)(dev, i);
851                         if (ret < 0)
852                                 return ret;
853                 }
854
855                 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
856                                 RTE_CACHE_LINE_SIZE);
857                 if (qp == NULL) {
858                         CDEV_LOG_ERR("failed to realloc qp meta data,"
859                                                 " nb_queues %u", nb_qpairs);
860                         return -(ENOMEM);
861                 }
862
863                 if (nb_qpairs > old_nb_queues) {
864                         uint16_t new_qs = nb_qpairs - old_nb_queues;
865
866                         memset(qp + old_nb_queues, 0,
867                                 sizeof(qp[0]) * new_qs);
868                 }
869
870                 dev->data->queue_pairs = qp;
871
872         }
873         dev->data->nb_queue_pairs = nb_qpairs;
874         return 0;
875 }
876
877 int
878 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
879 {
880         struct rte_cryptodev *dev;
881         int diag;
882
883         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
884                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
885                 return -EINVAL;
886         }
887
888         dev = &rte_crypto_devices[dev_id];
889
890         if (dev->data->dev_started) {
891                 CDEV_LOG_ERR(
892                     "device %d must be stopped to allow configuration", dev_id);
893                 return -EBUSY;
894         }
895
896         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
897
898         /* Setup new number of queue pairs and reconfigure device. */
899         diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
900                         config->socket_id);
901         if (diag != 0) {
902                 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
903                                 dev_id, diag);
904                 return diag;
905         }
906
907         return (*dev->dev_ops->dev_configure)(dev, config);
908 }
909
910
911 int
912 rte_cryptodev_start(uint8_t dev_id)
913 {
914         struct rte_cryptodev *dev;
915         int diag;
916
917         CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
918
919         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
920                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
921                 return -EINVAL;
922         }
923
924         dev = &rte_crypto_devices[dev_id];
925
926         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
927
928         if (dev->data->dev_started != 0) {
929                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
930                         dev_id);
931                 return 0;
932         }
933
934         diag = (*dev->dev_ops->dev_start)(dev);
935         if (diag == 0)
936                 dev->data->dev_started = 1;
937         else
938                 return diag;
939
940         return 0;
941 }
942
943 void
944 rte_cryptodev_stop(uint8_t dev_id)
945 {
946         struct rte_cryptodev *dev;
947
948         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
949                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
950                 return;
951         }
952
953         dev = &rte_crypto_devices[dev_id];
954
955         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
956
957         if (dev->data->dev_started == 0) {
958                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
959                         dev_id);
960                 return;
961         }
962
963         (*dev->dev_ops->dev_stop)(dev);
964         dev->data->dev_started = 0;
965 }
966
967 int
968 rte_cryptodev_close(uint8_t dev_id)
969 {
970         struct rte_cryptodev *dev;
971         int retval;
972
973         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
974                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
975                 return -1;
976         }
977
978         dev = &rte_crypto_devices[dev_id];
979
980         /* Device must be stopped before it can be closed */
981         if (dev->data->dev_started == 1) {
982                 CDEV_LOG_ERR("Device %u must be stopped before closing",
983                                 dev_id);
984                 return -EBUSY;
985         }
986
987         /* We can't close the device if there are outstanding sessions in use */
988         if (dev->data->session_pool != NULL) {
989                 if (!rte_mempool_full(dev->data->session_pool)) {
990                         CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
991                                         "has sessions still in use, free "
992                                         "all sessions before calling close",
993                                         (unsigned)dev_id);
994                         return -EBUSY;
995                 }
996         }
997
998         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
999         retval = (*dev->dev_ops->dev_close)(dev);
1000
1001         if (retval < 0)
1002                 return retval;
1003
1004         return 0;
1005 }
1006
1007 int
1008 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1009                 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1010
1011 {
1012         struct rte_cryptodev *dev;
1013
1014         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1015                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1016                 return -EINVAL;
1017         }
1018
1019         dev = &rte_crypto_devices[dev_id];
1020         if (queue_pair_id >= dev->data->nb_queue_pairs) {
1021                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1022                 return -EINVAL;
1023         }
1024
1025         if (!qp_conf) {
1026                 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1027                 return -EINVAL;
1028         }
1029
1030         if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1031                         (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1032                 CDEV_LOG_ERR("Invalid mempools\n");
1033                 return -EINVAL;
1034         }
1035
1036         if (qp_conf->mp_session) {
1037                 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1038                 uint32_t obj_size = qp_conf->mp_session->elt_size;
1039                 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1040                 struct rte_cryptodev_sym_session s = {0};
1041
1042                 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1043                 if (!pool_priv || qp_conf->mp_session->private_data_size <
1044                                 sizeof(*pool_priv)) {
1045                         CDEV_LOG_ERR("Invalid mempool\n");
1046                         return -EINVAL;
1047                 }
1048
1049                 s.nb_drivers = pool_priv->nb_drivers;
1050                 s.user_data_sz = pool_priv->user_data_sz;
1051
1052                 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1053                         obj_size) || (s.nb_drivers <= dev->driver_id) ||
1054                         rte_cryptodev_sym_get_private_session_size(dev_id) >
1055                                 obj_priv_size) {
1056                         CDEV_LOG_ERR("Invalid mempool\n");
1057                         return -EINVAL;
1058                 }
1059         }
1060
1061         if (dev->data->dev_started) {
1062                 CDEV_LOG_ERR(
1063                     "device %d must be stopped to allow configuration", dev_id);
1064                 return -EBUSY;
1065         }
1066
1067         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1068
1069         return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1070                         socket_id);
1071 }
1072
1073
1074 int
1075 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1076 {
1077         struct rte_cryptodev *dev;
1078
1079         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1080                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1081                 return -ENODEV;
1082         }
1083
1084         if (stats == NULL) {
1085                 CDEV_LOG_ERR("Invalid stats ptr");
1086                 return -EINVAL;
1087         }
1088
1089         dev = &rte_crypto_devices[dev_id];
1090         memset(stats, 0, sizeof(*stats));
1091
1092         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1093         (*dev->dev_ops->stats_get)(dev, stats);
1094         return 0;
1095 }
1096
1097 void
1098 rte_cryptodev_stats_reset(uint8_t dev_id)
1099 {
1100         struct rte_cryptodev *dev;
1101
1102         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1103                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1104                 return;
1105         }
1106
1107         dev = &rte_crypto_devices[dev_id];
1108
1109         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1110         (*dev->dev_ops->stats_reset)(dev);
1111 }
1112
1113
1114 void
1115 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1116 {
1117         struct rte_cryptodev *dev;
1118
1119         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1120                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1121                 return;
1122         }
1123
1124         dev = &rte_crypto_devices[dev_id];
1125
1126         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1127
1128         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1129         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1130
1131         dev_info->driver_name = dev->device->driver->name;
1132         dev_info->device = dev->device;
1133 }
1134
1135
1136 int
1137 rte_cryptodev_callback_register(uint8_t dev_id,
1138                         enum rte_cryptodev_event_type event,
1139                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1140 {
1141         struct rte_cryptodev *dev;
1142         struct rte_cryptodev_callback *user_cb;
1143
1144         if (!cb_fn)
1145                 return -EINVAL;
1146
1147         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1148                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1149                 return -EINVAL;
1150         }
1151
1152         dev = &rte_crypto_devices[dev_id];
1153         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1154
1155         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1156                 if (user_cb->cb_fn == cb_fn &&
1157                         user_cb->cb_arg == cb_arg &&
1158                         user_cb->event == event) {
1159                         break;
1160                 }
1161         }
1162
1163         /* create a new callback. */
1164         if (user_cb == NULL) {
1165                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1166                                 sizeof(struct rte_cryptodev_callback), 0);
1167                 if (user_cb != NULL) {
1168                         user_cb->cb_fn = cb_fn;
1169                         user_cb->cb_arg = cb_arg;
1170                         user_cb->event = event;
1171                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1172                 }
1173         }
1174
1175         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1176         return (user_cb == NULL) ? -ENOMEM : 0;
1177 }
1178
1179 int
1180 rte_cryptodev_callback_unregister(uint8_t dev_id,
1181                         enum rte_cryptodev_event_type event,
1182                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1183 {
1184         int ret;
1185         struct rte_cryptodev *dev;
1186         struct rte_cryptodev_callback *cb, *next;
1187
1188         if (!cb_fn)
1189                 return -EINVAL;
1190
1191         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1192                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1193                 return -EINVAL;
1194         }
1195
1196         dev = &rte_crypto_devices[dev_id];
1197         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1198
1199         ret = 0;
1200         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1201
1202                 next = TAILQ_NEXT(cb, next);
1203
1204                 if (cb->cb_fn != cb_fn || cb->event != event ||
1205                                 (cb->cb_arg != (void *)-1 &&
1206                                 cb->cb_arg != cb_arg))
1207                         continue;
1208
1209                 /*
1210                  * if this callback is not executing right now,
1211                  * then remove it.
1212                  */
1213                 if (cb->active == 0) {
1214                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1215                         rte_free(cb);
1216                 } else {
1217                         ret = -EAGAIN;
1218                 }
1219         }
1220
1221         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1222         return ret;
1223 }
1224
1225 void
1226 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1227         enum rte_cryptodev_event_type event)
1228 {
1229         struct rte_cryptodev_callback *cb_lst;
1230         struct rte_cryptodev_callback dev_cb;
1231
1232         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1233         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1234                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1235                         continue;
1236                 dev_cb = *cb_lst;
1237                 cb_lst->active = 1;
1238                 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1239                 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1240                                                 dev_cb.cb_arg);
1241                 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1242                 cb_lst->active = 0;
1243         }
1244         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1245 }
1246
1247
1248 int
1249 rte_cryptodev_sym_session_init(uint8_t dev_id,
1250                 struct rte_cryptodev_sym_session *sess,
1251                 struct rte_crypto_sym_xform *xforms,
1252                 struct rte_mempool *mp)
1253 {
1254         struct rte_cryptodev *dev;
1255         uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1256                         dev_id);
1257         uint8_t index;
1258         int ret;
1259
1260         dev = rte_cryptodev_pmd_get_dev(dev_id);
1261
1262         if (sess == NULL || xforms == NULL || dev == NULL)
1263                 return -EINVAL;
1264
1265         if (mp->elt_size < sess_priv_sz)
1266                 return -EINVAL;
1267
1268         index = dev->driver_id;
1269         if (index >= sess->nb_drivers)
1270                 return -EINVAL;
1271
1272         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1273
1274         if (sess->sess_data[index].refcnt == 0) {
1275                 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1276                                                         sess, mp);
1277                 if (ret < 0) {
1278                         CDEV_LOG_ERR(
1279                                 "dev_id %d failed to configure session details",
1280                                 dev_id);
1281                         return ret;
1282                 }
1283         }
1284
1285         sess->sess_data[index].refcnt++;
1286         return 0;
1287 }
1288
1289 int
1290 rte_cryptodev_asym_session_init(uint8_t dev_id,
1291                 struct rte_cryptodev_asym_session *sess,
1292                 struct rte_crypto_asym_xform *xforms,
1293                 struct rte_mempool *mp)
1294 {
1295         struct rte_cryptodev *dev;
1296         uint8_t index;
1297         int ret;
1298
1299         dev = rte_cryptodev_pmd_get_dev(dev_id);
1300
1301         if (sess == NULL || xforms == NULL || dev == NULL)
1302                 return -EINVAL;
1303
1304         index = dev->driver_id;
1305
1306         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1307                                 -ENOTSUP);
1308
1309         if (sess->sess_private_data[index] == NULL) {
1310                 ret = dev->dev_ops->asym_session_configure(dev,
1311                                                         xforms,
1312                                                         sess, mp);
1313                 if (ret < 0) {
1314                         CDEV_LOG_ERR(
1315                                 "dev_id %d failed to configure session details",
1316                                 dev_id);
1317                         return ret;
1318                 }
1319         }
1320
1321         return 0;
1322 }
1323
1324 struct rte_mempool *
1325 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1326         uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1327         int socket_id)
1328 {
1329         struct rte_mempool *mp;
1330         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1331         uint32_t obj_sz;
1332
1333         obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1334         if (obj_sz > elt_size)
1335                 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1336                                 obj_sz);
1337         else
1338                 obj_sz = elt_size;
1339
1340         mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1341                         (uint32_t)(sizeof(*pool_priv)),
1342                         NULL, NULL, NULL, NULL,
1343                         socket_id, 0);
1344         if (mp == NULL) {
1345                 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1346                         __func__, name, rte_errno);
1347                 return NULL;
1348         }
1349
1350         pool_priv = rte_mempool_get_priv(mp);
1351         if (!pool_priv) {
1352                 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1353                         __func__, name);
1354                 rte_mempool_free(mp);
1355                 return NULL;
1356         }
1357
1358         pool_priv->nb_drivers = nb_drivers;
1359         pool_priv->user_data_sz = user_data_size;
1360
1361         return mp;
1362 }
1363
1364 static unsigned int
1365 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1366 {
1367         return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1368                         sess->user_data_sz;
1369 }
1370
1371 struct rte_cryptodev_sym_session *
1372 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1373 {
1374         struct rte_cryptodev_sym_session *sess;
1375         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1376
1377         if (!mp) {
1378                 CDEV_LOG_ERR("Invalid mempool\n");
1379                 return NULL;
1380         }
1381
1382         pool_priv = rte_mempool_get_priv(mp);
1383
1384         if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) {
1385                 CDEV_LOG_ERR("Invalid mempool\n");
1386                 return NULL;
1387         }
1388
1389         /* Allocate a session structure from the session pool */
1390         if (rte_mempool_get(mp, (void **)&sess)) {
1391                 CDEV_LOG_ERR("couldn't get object from session mempool");
1392                 return NULL;
1393         }
1394
1395         sess->nb_drivers = pool_priv->nb_drivers;
1396         sess->user_data_sz = pool_priv->user_data_sz;
1397         sess->opaque_data = 0;
1398
1399         /* Clear device session pointer.
1400          * Include the flag indicating presence of user data
1401          */
1402         memset(sess->sess_data, 0,
1403                         rte_cryptodev_sym_session_data_size(sess));
1404
1405         return sess;
1406 }
1407
1408 struct rte_cryptodev_asym_session *
1409 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1410 {
1411         struct rte_cryptodev_asym_session *sess;
1412
1413         /* Allocate a session structure from the session pool */
1414         if (rte_mempool_get(mp, (void **)&sess)) {
1415                 CDEV_LOG_ERR("couldn't get object from session mempool");
1416                 return NULL;
1417         }
1418
1419         /* Clear device session pointer.
1420          * Include the flag indicating presence of private data
1421          */
1422         memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1423
1424         return sess;
1425 }
1426
1427 int
1428 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1429                 struct rte_cryptodev_sym_session *sess)
1430 {
1431         struct rte_cryptodev *dev;
1432         uint8_t driver_id;
1433
1434         dev = rte_cryptodev_pmd_get_dev(dev_id);
1435
1436         if (dev == NULL || sess == NULL)
1437                 return -EINVAL;
1438
1439         driver_id = dev->driver_id;
1440         if (sess->sess_data[driver_id].refcnt == 0)
1441                 return 0;
1442         if (--sess->sess_data[driver_id].refcnt != 0)
1443                 return -EBUSY;
1444
1445         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1446
1447         dev->dev_ops->sym_session_clear(dev, sess);
1448
1449         return 0;
1450 }
1451
1452 int
1453 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1454                 struct rte_cryptodev_asym_session *sess)
1455 {
1456         struct rte_cryptodev *dev;
1457
1458         dev = rte_cryptodev_pmd_get_dev(dev_id);
1459
1460         if (dev == NULL || sess == NULL)
1461                 return -EINVAL;
1462
1463         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1464
1465         dev->dev_ops->asym_session_clear(dev, sess);
1466
1467         return 0;
1468 }
1469
1470 int
1471 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1472 {
1473         uint8_t i;
1474         struct rte_mempool *sess_mp;
1475
1476         if (sess == NULL)
1477                 return -EINVAL;
1478
1479         /* Check that all device private data has been freed */
1480         for (i = 0; i < sess->nb_drivers; i++) {
1481                 if (sess->sess_data[i].refcnt != 0)
1482                         return -EBUSY;
1483         }
1484
1485         /* Return session to mempool */
1486         sess_mp = rte_mempool_from_obj(sess);
1487         rte_mempool_put(sess_mp, sess);
1488
1489         return 0;
1490 }
1491
1492 int
1493 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1494 {
1495         uint8_t i;
1496         void *sess_priv;
1497         struct rte_mempool *sess_mp;
1498
1499         if (sess == NULL)
1500                 return -EINVAL;
1501
1502         /* Check that all device private data has been freed */
1503         for (i = 0; i < nb_drivers; i++) {
1504                 sess_priv = get_asym_session_private_data(sess, i);
1505                 if (sess_priv != NULL)
1506                         return -EBUSY;
1507         }
1508
1509         /* Return session to mempool */
1510         sess_mp = rte_mempool_from_obj(sess);
1511         rte_mempool_put(sess_mp, sess);
1512
1513         return 0;
1514 }
1515
1516 unsigned int
1517 rte_cryptodev_sym_get_header_session_size(void)
1518 {
1519         /*
1520          * Header contains pointers to the private data of all registered
1521          * drivers and all necessary information to ensure safely clear
1522          * or free al session.
1523          */
1524         struct rte_cryptodev_sym_session s = {0};
1525
1526         s.nb_drivers = nb_drivers;
1527
1528         return (unsigned int)(sizeof(s) +
1529                         rte_cryptodev_sym_session_data_size(&s));
1530 }
1531
1532 unsigned int
1533 rte_cryptodev_sym_get_existing_header_session_size(
1534                 struct rte_cryptodev_sym_session *sess)
1535 {
1536         if (!sess)
1537                 return 0;
1538         else
1539                 return (unsigned int)(sizeof(*sess) +
1540                                 rte_cryptodev_sym_session_data_size(sess));
1541 }
1542
1543 unsigned int
1544 rte_cryptodev_asym_get_header_session_size(void)
1545 {
1546         /*
1547          * Header contains pointers to the private data
1548          * of all registered drivers, and a flag which
1549          * indicates presence of private data
1550          */
1551         return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1552 }
1553
1554 unsigned int
1555 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1556 {
1557         struct rte_cryptodev *dev;
1558         unsigned int priv_sess_size;
1559
1560         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1561                 return 0;
1562
1563         dev = rte_cryptodev_pmd_get_dev(dev_id);
1564
1565         if (*dev->dev_ops->sym_session_get_size == NULL)
1566                 return 0;
1567
1568         priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1569
1570         return priv_sess_size;
1571 }
1572
1573 unsigned int
1574 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1575 {
1576         struct rte_cryptodev *dev;
1577         unsigned int header_size = sizeof(void *) * nb_drivers;
1578         unsigned int priv_sess_size;
1579
1580         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1581                 return 0;
1582
1583         dev = rte_cryptodev_pmd_get_dev(dev_id);
1584
1585         if (*dev->dev_ops->asym_session_get_size == NULL)
1586                 return 0;
1587
1588         priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1589         if (priv_sess_size < header_size)
1590                 return header_size;
1591
1592         return priv_sess_size;
1593
1594 }
1595
1596 int
1597 rte_cryptodev_sym_session_set_user_data(
1598                                         struct rte_cryptodev_sym_session *sess,
1599                                         void *data,
1600                                         uint16_t size)
1601 {
1602         if (sess == NULL)
1603                 return -EINVAL;
1604
1605         if (sess->user_data_sz < size)
1606                 return -ENOMEM;
1607
1608         rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1609         return 0;
1610 }
1611
1612 void *
1613 rte_cryptodev_sym_session_get_user_data(
1614                                         struct rte_cryptodev_sym_session *sess)
1615 {
1616         if (sess == NULL || sess->user_data_sz == 0)
1617                 return NULL;
1618
1619         return (void *)(sess->sess_data + sess->nb_drivers);
1620 }
1621
1622 /** Initialise rte_crypto_op mempool element */
1623 static void
1624 rte_crypto_op_init(struct rte_mempool *mempool,
1625                 void *opaque_arg,
1626                 void *_op_data,
1627                 __rte_unused unsigned i)
1628 {
1629         struct rte_crypto_op *op = _op_data;
1630         enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1631
1632         memset(_op_data, 0, mempool->elt_size);
1633
1634         __rte_crypto_op_reset(op, type);
1635
1636         op->phys_addr = rte_mem_virt2iova(_op_data);
1637         op->mempool = mempool;
1638 }
1639
1640
1641 struct rte_mempool *
1642 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1643                 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1644                 int socket_id)
1645 {
1646         struct rte_crypto_op_pool_private *priv;
1647
1648         unsigned elt_size = sizeof(struct rte_crypto_op) +
1649                         priv_size;
1650
1651         if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1652                 elt_size += sizeof(struct rte_crypto_sym_op);
1653         } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1654                 elt_size += sizeof(struct rte_crypto_asym_op);
1655         } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1656                 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1657                                     sizeof(struct rte_crypto_asym_op));
1658         } else {
1659                 CDEV_LOG_ERR("Invalid op_type\n");
1660                 return NULL;
1661         }
1662
1663         /* lookup mempool in case already allocated */
1664         struct rte_mempool *mp = rte_mempool_lookup(name);
1665
1666         if (mp != NULL) {
1667                 priv = (struct rte_crypto_op_pool_private *)
1668                                 rte_mempool_get_priv(mp);
1669
1670                 if (mp->elt_size != elt_size ||
1671                                 mp->cache_size < cache_size ||
1672                                 mp->size < nb_elts ||
1673                                 priv->priv_size <  priv_size) {
1674                         mp = NULL;
1675                         CDEV_LOG_ERR("Mempool %s already exists but with "
1676                                         "incompatible parameters", name);
1677                         return NULL;
1678                 }
1679                 return mp;
1680         }
1681
1682         mp = rte_mempool_create(
1683                         name,
1684                         nb_elts,
1685                         elt_size,
1686                         cache_size,
1687                         sizeof(struct rte_crypto_op_pool_private),
1688                         NULL,
1689                         NULL,
1690                         rte_crypto_op_init,
1691                         &type,
1692                         socket_id,
1693                         0);
1694
1695         if (mp == NULL) {
1696                 CDEV_LOG_ERR("Failed to create mempool %s", name);
1697                 return NULL;
1698         }
1699
1700         priv = (struct rte_crypto_op_pool_private *)
1701                         rte_mempool_get_priv(mp);
1702
1703         priv->priv_size = priv_size;
1704         priv->type = type;
1705
1706         return mp;
1707 }
1708
1709 int
1710 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1711 {
1712         struct rte_cryptodev *dev = NULL;
1713         uint32_t i = 0;
1714
1715         if (name == NULL)
1716                 return -EINVAL;
1717
1718         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1719                 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1720                                 "%s_%u", dev_name_prefix, i);
1721
1722                 if (ret < 0)
1723                         return ret;
1724
1725                 dev = rte_cryptodev_pmd_get_named_dev(name);
1726                 if (!dev)
1727                         return 0;
1728         }
1729
1730         return -1;
1731 }
1732
1733 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1734
1735 static struct cryptodev_driver_list cryptodev_driver_list =
1736         TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1737
1738 int
1739 rte_cryptodev_driver_id_get(const char *name)
1740 {
1741         struct cryptodev_driver *driver;
1742         const char *driver_name;
1743
1744         if (name == NULL) {
1745                 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1746                 return -1;
1747         }
1748
1749         TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1750                 driver_name = driver->driver->name;
1751                 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
1752                         return driver->id;
1753         }
1754         return -1;
1755 }
1756
1757 const char *
1758 rte_cryptodev_name_get(uint8_t dev_id)
1759 {
1760         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id);
1761
1762         if (dev == NULL)
1763                 return NULL;
1764
1765         return dev->data->name;
1766 }
1767
1768 const char *
1769 rte_cryptodev_driver_name_get(uint8_t driver_id)
1770 {
1771         struct cryptodev_driver *driver;
1772
1773         TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1774                 if (driver->id == driver_id)
1775                         return driver->driver->name;
1776         return NULL;
1777 }
1778
1779 uint8_t
1780 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1781                 const struct rte_driver *drv)
1782 {
1783         crypto_drv->driver = drv;
1784         crypto_drv->id = nb_drivers;
1785
1786         TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1787
1788         return nb_drivers++;
1789 }