cryptodev: add feature flag for non-byte aligned data
[dpdk.git] / lib / librte_cryptodev / rte_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43 #include "rte_cryptodev_trace.h"
44
45 static uint8_t nb_drivers;
46
47 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
48
49 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
50
51 static struct rte_cryptodev_global cryptodev_globals = {
52                 .devs                   = rte_crypto_devices,
53                 .data                   = { NULL },
54                 .nb_devs                = 0
55 };
56
57 /* spinlock for crypto device callbacks */
58 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
59
60
61 /**
62  * The user application callback description.
63  *
64  * It contains callback address to be registered by user application,
65  * the pointer to the parameters for callback, and the event type.
66  */
67 struct rte_cryptodev_callback {
68         TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
69         rte_cryptodev_cb_fn cb_fn;              /**< Callback address */
70         void *cb_arg;                           /**< Parameter for callback */
71         enum rte_cryptodev_event_type event;    /**< Interrupt event type */
72         uint32_t active;                        /**< Callback is executing */
73 };
74
75 /**
76  * The crypto cipher algorithm strings identifiers.
77  * It could be used in application command line.
78  */
79 const char *
80 rte_crypto_cipher_algorithm_strings[] = {
81         [RTE_CRYPTO_CIPHER_3DES_CBC]    = "3des-cbc",
82         [RTE_CRYPTO_CIPHER_3DES_ECB]    = "3des-ecb",
83         [RTE_CRYPTO_CIPHER_3DES_CTR]    = "3des-ctr",
84
85         [RTE_CRYPTO_CIPHER_AES_CBC]     = "aes-cbc",
86         [RTE_CRYPTO_CIPHER_AES_CTR]     = "aes-ctr",
87         [RTE_CRYPTO_CIPHER_AES_DOCSISBPI]       = "aes-docsisbpi",
88         [RTE_CRYPTO_CIPHER_AES_ECB]     = "aes-ecb",
89         [RTE_CRYPTO_CIPHER_AES_F8]      = "aes-f8",
90         [RTE_CRYPTO_CIPHER_AES_XTS]     = "aes-xts",
91
92         [RTE_CRYPTO_CIPHER_ARC4]        = "arc4",
93
94         [RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
95         [RTE_CRYPTO_CIPHER_DES_DOCSISBPI]       = "des-docsisbpi",
96
97         [RTE_CRYPTO_CIPHER_NULL]        = "null",
98
99         [RTE_CRYPTO_CIPHER_KASUMI_F8]   = "kasumi-f8",
100         [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
101         [RTE_CRYPTO_CIPHER_ZUC_EEA3]    = "zuc-eea3"
102 };
103
104 /**
105  * The crypto cipher operation strings identifiers.
106  * It could be used in application command line.
107  */
108 const char *
109 rte_crypto_cipher_operation_strings[] = {
110                 [RTE_CRYPTO_CIPHER_OP_ENCRYPT]  = "encrypt",
111                 [RTE_CRYPTO_CIPHER_OP_DECRYPT]  = "decrypt"
112 };
113
114 /**
115  * The crypto auth algorithm strings identifiers.
116  * It could be used in application command line.
117  */
118 const char *
119 rte_crypto_auth_algorithm_strings[] = {
120         [RTE_CRYPTO_AUTH_AES_CBC_MAC]   = "aes-cbc-mac",
121         [RTE_CRYPTO_AUTH_AES_CMAC]      = "aes-cmac",
122         [RTE_CRYPTO_AUTH_AES_GMAC]      = "aes-gmac",
123         [RTE_CRYPTO_AUTH_AES_XCBC_MAC]  = "aes-xcbc-mac",
124
125         [RTE_CRYPTO_AUTH_MD5]           = "md5",
126         [RTE_CRYPTO_AUTH_MD5_HMAC]      = "md5-hmac",
127
128         [RTE_CRYPTO_AUTH_NULL]          = "null",
129
130         [RTE_CRYPTO_AUTH_SHA1]          = "sha1",
131         [RTE_CRYPTO_AUTH_SHA1_HMAC]     = "sha1-hmac",
132
133         [RTE_CRYPTO_AUTH_SHA224]        = "sha2-224",
134         [RTE_CRYPTO_AUTH_SHA224_HMAC]   = "sha2-224-hmac",
135         [RTE_CRYPTO_AUTH_SHA256]        = "sha2-256",
136         [RTE_CRYPTO_AUTH_SHA256_HMAC]   = "sha2-256-hmac",
137         [RTE_CRYPTO_AUTH_SHA384]        = "sha2-384",
138         [RTE_CRYPTO_AUTH_SHA384_HMAC]   = "sha2-384-hmac",
139         [RTE_CRYPTO_AUTH_SHA512]        = "sha2-512",
140         [RTE_CRYPTO_AUTH_SHA512_HMAC]   = "sha2-512-hmac",
141
142         [RTE_CRYPTO_AUTH_KASUMI_F9]     = "kasumi-f9",
143         [RTE_CRYPTO_AUTH_SNOW3G_UIA2]   = "snow3g-uia2",
144         [RTE_CRYPTO_AUTH_ZUC_EIA3]      = "zuc-eia3"
145 };
146
147 /**
148  * The crypto AEAD algorithm strings identifiers.
149  * It could be used in application command line.
150  */
151 const char *
152 rte_crypto_aead_algorithm_strings[] = {
153         [RTE_CRYPTO_AEAD_AES_CCM]       = "aes-ccm",
154         [RTE_CRYPTO_AEAD_AES_GCM]       = "aes-gcm",
155 };
156
157 /**
158  * The crypto AEAD operation strings identifiers.
159  * It could be used in application command line.
160  */
161 const char *
162 rte_crypto_aead_operation_strings[] = {
163         [RTE_CRYPTO_AEAD_OP_ENCRYPT]    = "encrypt",
164         [RTE_CRYPTO_AEAD_OP_DECRYPT]    = "decrypt"
165 };
166
167 /**
168  * Asymmetric crypto transform operation strings identifiers.
169  */
170 const char *rte_crypto_asym_xform_strings[] = {
171         [RTE_CRYPTO_ASYM_XFORM_NONE]    = "none",
172         [RTE_CRYPTO_ASYM_XFORM_RSA]     = "rsa",
173         [RTE_CRYPTO_ASYM_XFORM_MODEX]   = "modexp",
174         [RTE_CRYPTO_ASYM_XFORM_MODINV]  = "modinv",
175         [RTE_CRYPTO_ASYM_XFORM_DH]      = "dh",
176         [RTE_CRYPTO_ASYM_XFORM_DSA]     = "dsa",
177         [RTE_CRYPTO_ASYM_XFORM_ECDSA]   = "ecdsa",
178         [RTE_CRYPTO_ASYM_XFORM_ECPM]    = "ecpm",
179 };
180
181 /**
182  * Asymmetric crypto operation strings identifiers.
183  */
184 const char *rte_crypto_asym_op_strings[] = {
185         [RTE_CRYPTO_ASYM_OP_ENCRYPT]    = "encrypt",
186         [RTE_CRYPTO_ASYM_OP_DECRYPT]    = "decrypt",
187         [RTE_CRYPTO_ASYM_OP_SIGN]       = "sign",
188         [RTE_CRYPTO_ASYM_OP_VERIFY]     = "verify",
189         [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]       = "priv_key_generate",
190         [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
191         [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
192 };
193
194 /**
195  * The private data structure stored in the session mempool private data.
196  */
197 struct rte_cryptodev_sym_session_pool_private_data {
198         uint16_t nb_drivers;
199         /**< number of elements in sess_data array */
200         uint16_t user_data_sz;
201         /**< session user data will be placed after sess_data */
202 };
203
204 int
205 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
206                 const char *algo_string)
207 {
208         unsigned int i;
209
210         for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
211                 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
212                         *algo_enum = (enum rte_crypto_cipher_algorithm) i;
213                         return 0;
214                 }
215         }
216
217         /* Invalid string */
218         return -1;
219 }
220
221 int
222 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
223                 const char *algo_string)
224 {
225         unsigned int i;
226
227         for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
228                 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
229                         *algo_enum = (enum rte_crypto_auth_algorithm) i;
230                         return 0;
231                 }
232         }
233
234         /* Invalid string */
235         return -1;
236 }
237
238 int
239 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
240                 const char *algo_string)
241 {
242         unsigned int i;
243
244         for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
245                 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
246                         *algo_enum = (enum rte_crypto_aead_algorithm) i;
247                         return 0;
248                 }
249         }
250
251         /* Invalid string */
252         return -1;
253 }
254
255 int
256 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
257                 const char *xform_string)
258 {
259         unsigned int i;
260
261         for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
262                 if (strcmp(xform_string,
263                         rte_crypto_asym_xform_strings[i]) == 0) {
264                         *xform_enum = (enum rte_crypto_asym_xform_type) i;
265                         return 0;
266                 }
267         }
268
269         /* Invalid string */
270         return -1;
271 }
272
273 /**
274  * The crypto auth operation strings identifiers.
275  * It could be used in application command line.
276  */
277 const char *
278 rte_crypto_auth_operation_strings[] = {
279                 [RTE_CRYPTO_AUTH_OP_VERIFY]     = "verify",
280                 [RTE_CRYPTO_AUTH_OP_GENERATE]   = "generate"
281 };
282
283 const struct rte_cryptodev_symmetric_capability *
284 rte_cryptodev_sym_capability_get(uint8_t dev_id,
285                 const struct rte_cryptodev_sym_capability_idx *idx)
286 {
287         const struct rte_cryptodev_capabilities *capability;
288         struct rte_cryptodev_info dev_info;
289         int i = 0;
290
291         rte_cryptodev_info_get(dev_id, &dev_info);
292
293         while ((capability = &dev_info.capabilities[i++])->op !=
294                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
295                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
296                         continue;
297
298                 if (capability->sym.xform_type != idx->type)
299                         continue;
300
301                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
302                         capability->sym.auth.algo == idx->algo.auth)
303                         return &capability->sym;
304
305                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
306                         capability->sym.cipher.algo == idx->algo.cipher)
307                         return &capability->sym;
308
309                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
310                                 capability->sym.aead.algo == idx->algo.aead)
311                         return &capability->sym;
312         }
313
314         return NULL;
315
316 }
317
318 static int
319 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
320 {
321         unsigned int next_size;
322
323         /* Check lower/upper bounds */
324         if (size < range->min)
325                 return -1;
326
327         if (size > range->max)
328                 return -1;
329
330         /* If range is actually only one value, size is correct */
331         if (range->increment == 0)
332                 return 0;
333
334         /* Check if value is one of the supported sizes */
335         for (next_size = range->min; next_size <= range->max;
336                         next_size += range->increment)
337                 if (size == next_size)
338                         return 0;
339
340         return -1;
341 }
342
343 const struct rte_cryptodev_asymmetric_xform_capability *
344 rte_cryptodev_asym_capability_get(uint8_t dev_id,
345                 const struct rte_cryptodev_asym_capability_idx *idx)
346 {
347         const struct rte_cryptodev_capabilities *capability;
348         struct rte_cryptodev_info dev_info;
349         unsigned int i = 0;
350
351         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
352         rte_cryptodev_info_get(dev_id, &dev_info);
353
354         while ((capability = &dev_info.capabilities[i++])->op !=
355                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
356                 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
357                         continue;
358
359                 if (capability->asym.xform_capa.xform_type == idx->type)
360                         return &capability->asym.xform_capa;
361         }
362         return NULL;
363 };
364
365 int
366 rte_cryptodev_sym_capability_check_cipher(
367                 const struct rte_cryptodev_symmetric_capability *capability,
368                 uint16_t key_size, uint16_t iv_size)
369 {
370         if (param_range_check(key_size, &capability->cipher.key_size) != 0)
371                 return -1;
372
373         if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
374                 return -1;
375
376         return 0;
377 }
378
379 int
380 rte_cryptodev_sym_capability_check_auth(
381                 const struct rte_cryptodev_symmetric_capability *capability,
382                 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
383 {
384         if (param_range_check(key_size, &capability->auth.key_size) != 0)
385                 return -1;
386
387         if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
388                 return -1;
389
390         if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
391                 return -1;
392
393         return 0;
394 }
395
396 int
397 rte_cryptodev_sym_capability_check_aead(
398                 const struct rte_cryptodev_symmetric_capability *capability,
399                 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
400                 uint16_t iv_size)
401 {
402         if (param_range_check(key_size, &capability->aead.key_size) != 0)
403                 return -1;
404
405         if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
406                 return -1;
407
408         if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
409                 return -1;
410
411         if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
412                 return -1;
413
414         return 0;
415 }
416 int
417 rte_cryptodev_asym_xform_capability_check_optype(
418         const struct rte_cryptodev_asymmetric_xform_capability *capability,
419         enum rte_crypto_asym_op_type op_type)
420 {
421         if (capability->op_types & (1 << op_type))
422                 return 1;
423
424         return 0;
425 }
426
427 int
428 rte_cryptodev_asym_xform_capability_check_modlen(
429         const struct rte_cryptodev_asymmetric_xform_capability *capability,
430         uint16_t modlen)
431 {
432         /* no need to check for limits, if min or max = 0 */
433         if (capability->modlen.min != 0) {
434                 if (modlen < capability->modlen.min)
435                         return -1;
436         }
437
438         if (capability->modlen.max != 0) {
439                 if (modlen > capability->modlen.max)
440                         return -1;
441         }
442
443         /* in any case, check if given modlen is module increment */
444         if (capability->modlen.increment != 0) {
445                 if (modlen % (capability->modlen.increment))
446                         return -1;
447         }
448
449         return 0;
450 }
451
452
453 const char *
454 rte_cryptodev_get_feature_name(uint64_t flag)
455 {
456         switch (flag) {
457         case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
458                 return "SYMMETRIC_CRYPTO";
459         case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
460                 return "ASYMMETRIC_CRYPTO";
461         case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
462                 return "SYM_OPERATION_CHAINING";
463         case RTE_CRYPTODEV_FF_CPU_SSE:
464                 return "CPU_SSE";
465         case RTE_CRYPTODEV_FF_CPU_AVX:
466                 return "CPU_AVX";
467         case RTE_CRYPTODEV_FF_CPU_AVX2:
468                 return "CPU_AVX2";
469         case RTE_CRYPTODEV_FF_CPU_AVX512:
470                 return "CPU_AVX512";
471         case RTE_CRYPTODEV_FF_CPU_AESNI:
472                 return "CPU_AESNI";
473         case RTE_CRYPTODEV_FF_HW_ACCELERATED:
474                 return "HW_ACCELERATED";
475         case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
476                 return "IN_PLACE_SGL";
477         case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
478                 return "OOP_SGL_IN_SGL_OUT";
479         case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
480                 return "OOP_SGL_IN_LB_OUT";
481         case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
482                 return "OOP_LB_IN_SGL_OUT";
483         case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
484                 return "OOP_LB_IN_LB_OUT";
485         case RTE_CRYPTODEV_FF_CPU_NEON:
486                 return "CPU_NEON";
487         case RTE_CRYPTODEV_FF_CPU_ARM_CE:
488                 return "CPU_ARM_CE";
489         case RTE_CRYPTODEV_FF_SECURITY:
490                 return "SECURITY_PROTOCOL";
491         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
492                 return "RSA_PRIV_OP_KEY_EXP";
493         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
494                 return "RSA_PRIV_OP_KEY_QT";
495         case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
496                 return "DIGEST_ENCRYPTED";
497         case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
498                 return "SYM_CPU_CRYPTO";
499         case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
500                 return "ASYM_SESSIONLESS";
501         case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
502                 return "SYM_SESSIONLESS";
503         case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
504                 return "NON_BYTE_ALIGNED_DATA";
505         default:
506                 return NULL;
507         }
508 }
509
510 struct rte_cryptodev *
511 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
512 {
513         return &cryptodev_globals.devs[dev_id];
514 }
515
516 struct rte_cryptodev *
517 rte_cryptodev_pmd_get_named_dev(const char *name)
518 {
519         struct rte_cryptodev *dev;
520         unsigned int i;
521
522         if (name == NULL)
523                 return NULL;
524
525         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
526                 dev = &cryptodev_globals.devs[i];
527
528                 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
529                                 (strcmp(dev->data->name, name) == 0))
530                         return dev;
531         }
532
533         return NULL;
534 }
535
536 static inline uint8_t
537 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
538 {
539         if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
540                         rte_crypto_devices[dev_id].data == NULL)
541                 return 0;
542
543         return 1;
544 }
545
546 unsigned int
547 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
548 {
549         struct rte_cryptodev *dev = NULL;
550
551         if (!rte_cryptodev_is_valid_device_data(dev_id))
552                 return 0;
553
554         dev = rte_cryptodev_pmd_get_dev(dev_id);
555         if (dev->attached != RTE_CRYPTODEV_ATTACHED)
556                 return 0;
557         else
558                 return 1;
559 }
560
561
562 int
563 rte_cryptodev_get_dev_id(const char *name)
564 {
565         unsigned i;
566
567         if (name == NULL)
568                 return -1;
569
570         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
571                 if (!rte_cryptodev_is_valid_device_data(i))
572                         continue;
573                 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
574                                 == 0) &&
575                                 (cryptodev_globals.devs[i].attached ==
576                                                 RTE_CRYPTODEV_ATTACHED))
577                         return i;
578         }
579
580         return -1;
581 }
582
583 uint8_t
584 rte_cryptodev_count(void)
585 {
586         return cryptodev_globals.nb_devs;
587 }
588
589 uint8_t
590 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
591 {
592         uint8_t i, dev_count = 0;
593
594         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
595                 if (cryptodev_globals.devs[i].driver_id == driver_id &&
596                         cryptodev_globals.devs[i].attached ==
597                                         RTE_CRYPTODEV_ATTACHED)
598                         dev_count++;
599
600         return dev_count;
601 }
602
603 uint8_t
604 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
605         uint8_t nb_devices)
606 {
607         uint8_t i, count = 0;
608         struct rte_cryptodev *devs = cryptodev_globals.devs;
609
610         for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
611                 if (!rte_cryptodev_is_valid_device_data(i))
612                         continue;
613
614                 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
615                         int cmp;
616
617                         cmp = strncmp(devs[i].device->driver->name,
618                                         driver_name,
619                                         strlen(driver_name) + 1);
620
621                         if (cmp == 0)
622                                 devices[count++] = devs[i].data->dev_id;
623                 }
624         }
625
626         return count;
627 }
628
629 void *
630 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
631 {
632         if (dev_id < RTE_CRYPTO_MAX_DEVS &&
633                         (rte_crypto_devices[dev_id].feature_flags &
634                         RTE_CRYPTODEV_FF_SECURITY))
635                 return rte_crypto_devices[dev_id].security_ctx;
636
637         return NULL;
638 }
639
640 int
641 rte_cryptodev_socket_id(uint8_t dev_id)
642 {
643         struct rte_cryptodev *dev;
644
645         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
646                 return -1;
647
648         dev = rte_cryptodev_pmd_get_dev(dev_id);
649
650         return dev->data->socket_id;
651 }
652
653 static inline int
654 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
655                 int socket_id)
656 {
657         char mz_name[RTE_MEMZONE_NAMESIZE];
658         const struct rte_memzone *mz;
659         int n;
660
661         /* generate memzone name */
662         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
663         if (n >= (int)sizeof(mz_name))
664                 return -EINVAL;
665
666         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
667                 mz = rte_memzone_reserve(mz_name,
668                                 sizeof(struct rte_cryptodev_data),
669                                 socket_id, 0);
670         } else
671                 mz = rte_memzone_lookup(mz_name);
672
673         if (mz == NULL)
674                 return -ENOMEM;
675
676         *data = mz->addr;
677         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
678                 memset(*data, 0, sizeof(struct rte_cryptodev_data));
679
680         return 0;
681 }
682
683 static inline int
684 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
685 {
686         char mz_name[RTE_MEMZONE_NAMESIZE];
687         const struct rte_memzone *mz;
688         int n;
689
690         /* generate memzone name */
691         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
692         if (n >= (int)sizeof(mz_name))
693                 return -EINVAL;
694
695         mz = rte_memzone_lookup(mz_name);
696         if (mz == NULL)
697                 return -ENOMEM;
698
699         RTE_ASSERT(*data == mz->addr);
700         *data = NULL;
701
702         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
703                 return rte_memzone_free(mz);
704
705         return 0;
706 }
707
708 static uint8_t
709 rte_cryptodev_find_free_device_index(void)
710 {
711         uint8_t dev_id;
712
713         for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
714                 if (rte_crypto_devices[dev_id].attached ==
715                                 RTE_CRYPTODEV_DETACHED)
716                         return dev_id;
717         }
718         return RTE_CRYPTO_MAX_DEVS;
719 }
720
721 struct rte_cryptodev *
722 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
723 {
724         struct rte_cryptodev *cryptodev;
725         uint8_t dev_id;
726
727         if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
728                 CDEV_LOG_ERR("Crypto device with name %s already "
729                                 "allocated!", name);
730                 return NULL;
731         }
732
733         dev_id = rte_cryptodev_find_free_device_index();
734         if (dev_id == RTE_CRYPTO_MAX_DEVS) {
735                 CDEV_LOG_ERR("Reached maximum number of crypto devices");
736                 return NULL;
737         }
738
739         cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
740
741         if (cryptodev->data == NULL) {
742                 struct rte_cryptodev_data **cryptodev_data =
743                                 &cryptodev_globals.data[dev_id];
744
745                 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
746                                 socket_id);
747
748                 if (retval < 0 || *cryptodev_data == NULL)
749                         return NULL;
750
751                 cryptodev->data = *cryptodev_data;
752
753                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
754                         strlcpy(cryptodev->data->name, name,
755                                 RTE_CRYPTODEV_NAME_MAX_LEN);
756
757                         cryptodev->data->dev_id = dev_id;
758                         cryptodev->data->socket_id = socket_id;
759                         cryptodev->data->dev_started = 0;
760                 }
761
762                 /* init user callbacks */
763                 TAILQ_INIT(&(cryptodev->link_intr_cbs));
764
765                 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
766
767                 cryptodev_globals.nb_devs++;
768         }
769
770         return cryptodev;
771 }
772
773 int
774 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
775 {
776         int ret;
777         uint8_t dev_id;
778
779         if (cryptodev == NULL)
780                 return -EINVAL;
781
782         dev_id = cryptodev->data->dev_id;
783
784         /* Close device only if device operations have been set */
785         if (cryptodev->dev_ops) {
786                 ret = rte_cryptodev_close(dev_id);
787                 if (ret < 0)
788                         return ret;
789         }
790
791         ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
792         if (ret < 0)
793                 return ret;
794
795         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
796         cryptodev_globals.nb_devs--;
797         return 0;
798 }
799
800 uint16_t
801 rte_cryptodev_queue_pair_count(uint8_t dev_id)
802 {
803         struct rte_cryptodev *dev;
804
805         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
806                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
807                 return 0;
808         }
809
810         dev = &rte_crypto_devices[dev_id];
811         return dev->data->nb_queue_pairs;
812 }
813
814 static int
815 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
816                 int socket_id)
817 {
818         struct rte_cryptodev_info dev_info;
819         void **qp;
820         unsigned i;
821
822         if ((dev == NULL) || (nb_qpairs < 1)) {
823                 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
824                                                         dev, nb_qpairs);
825                 return -EINVAL;
826         }
827
828         CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
829                         nb_qpairs, dev->data->dev_id);
830
831         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
832
833         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
834         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
835
836         if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
837                 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
838                                 nb_qpairs, dev->data->dev_id);
839             return -EINVAL;
840         }
841
842         if (dev->data->queue_pairs == NULL) { /* first time configuration */
843                 dev->data->queue_pairs = rte_zmalloc_socket(
844                                 "cryptodev->queue_pairs",
845                                 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
846                                 RTE_CACHE_LINE_SIZE, socket_id);
847
848                 if (dev->data->queue_pairs == NULL) {
849                         dev->data->nb_queue_pairs = 0;
850                         CDEV_LOG_ERR("failed to get memory for qp meta data, "
851                                                         "nb_queues %u",
852                                                         nb_qpairs);
853                         return -(ENOMEM);
854                 }
855         } else { /* re-configure */
856                 int ret;
857                 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
858
859                 qp = dev->data->queue_pairs;
860
861                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
862                                 -ENOTSUP);
863
864                 for (i = nb_qpairs; i < old_nb_queues; i++) {
865                         ret = (*dev->dev_ops->queue_pair_release)(dev, i);
866                         if (ret < 0)
867                                 return ret;
868                 }
869
870                 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
871                                 RTE_CACHE_LINE_SIZE);
872                 if (qp == NULL) {
873                         CDEV_LOG_ERR("failed to realloc qp meta data,"
874                                                 " nb_queues %u", nb_qpairs);
875                         return -(ENOMEM);
876                 }
877
878                 if (nb_qpairs > old_nb_queues) {
879                         uint16_t new_qs = nb_qpairs - old_nb_queues;
880
881                         memset(qp + old_nb_queues, 0,
882                                 sizeof(qp[0]) * new_qs);
883                 }
884
885                 dev->data->queue_pairs = qp;
886
887         }
888         dev->data->nb_queue_pairs = nb_qpairs;
889         return 0;
890 }
891
892 int
893 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
894 {
895         struct rte_cryptodev *dev;
896         int diag;
897
898         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
899                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
900                 return -EINVAL;
901         }
902
903         dev = &rte_crypto_devices[dev_id];
904
905         if (dev->data->dev_started) {
906                 CDEV_LOG_ERR(
907                     "device %d must be stopped to allow configuration", dev_id);
908                 return -EBUSY;
909         }
910
911         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
912
913         /* Setup new number of queue pairs and reconfigure device. */
914         diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
915                         config->socket_id);
916         if (diag != 0) {
917                 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
918                                 dev_id, diag);
919                 return diag;
920         }
921
922         rte_cryptodev_trace_configure(dev_id, config);
923         return (*dev->dev_ops->dev_configure)(dev, config);
924 }
925
926
927 int
928 rte_cryptodev_start(uint8_t dev_id)
929 {
930         struct rte_cryptodev *dev;
931         int diag;
932
933         CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
934
935         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
936                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
937                 return -EINVAL;
938         }
939
940         dev = &rte_crypto_devices[dev_id];
941
942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
943
944         if (dev->data->dev_started != 0) {
945                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
946                         dev_id);
947                 return 0;
948         }
949
950         diag = (*dev->dev_ops->dev_start)(dev);
951         rte_cryptodev_trace_start(dev_id, diag);
952         if (diag == 0)
953                 dev->data->dev_started = 1;
954         else
955                 return diag;
956
957         return 0;
958 }
959
960 void
961 rte_cryptodev_stop(uint8_t dev_id)
962 {
963         struct rte_cryptodev *dev;
964
965         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
966                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
967                 return;
968         }
969
970         dev = &rte_crypto_devices[dev_id];
971
972         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
973
974         if (dev->data->dev_started == 0) {
975                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
976                         dev_id);
977                 return;
978         }
979
980         (*dev->dev_ops->dev_stop)(dev);
981         rte_cryptodev_trace_stop(dev_id);
982         dev->data->dev_started = 0;
983 }
984
985 int
986 rte_cryptodev_close(uint8_t dev_id)
987 {
988         struct rte_cryptodev *dev;
989         int retval;
990
991         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
992                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
993                 return -1;
994         }
995
996         dev = &rte_crypto_devices[dev_id];
997
998         /* Device must be stopped before it can be closed */
999         if (dev->data->dev_started == 1) {
1000                 CDEV_LOG_ERR("Device %u must be stopped before closing",
1001                                 dev_id);
1002                 return -EBUSY;
1003         }
1004
1005         /* We can't close the device if there are outstanding sessions in use */
1006         if (dev->data->session_pool != NULL) {
1007                 if (!rte_mempool_full(dev->data->session_pool)) {
1008                         CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1009                                         "has sessions still in use, free "
1010                                         "all sessions before calling close",
1011                                         (unsigned)dev_id);
1012                         return -EBUSY;
1013                 }
1014         }
1015
1016         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1017         retval = (*dev->dev_ops->dev_close)(dev);
1018         rte_cryptodev_trace_close(dev_id, retval);
1019
1020         if (retval < 0)
1021                 return retval;
1022
1023         return 0;
1024 }
1025
1026 int
1027 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1028                 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1029
1030 {
1031         struct rte_cryptodev *dev;
1032
1033         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1034                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1035                 return -EINVAL;
1036         }
1037
1038         dev = &rte_crypto_devices[dev_id];
1039         if (queue_pair_id >= dev->data->nb_queue_pairs) {
1040                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1041                 return -EINVAL;
1042         }
1043
1044         if (!qp_conf) {
1045                 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1046                 return -EINVAL;
1047         }
1048
1049         if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1050                         (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1051                 CDEV_LOG_ERR("Invalid mempools\n");
1052                 return -EINVAL;
1053         }
1054
1055         if (qp_conf->mp_session) {
1056                 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1057                 uint32_t obj_size = qp_conf->mp_session->elt_size;
1058                 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1059                 struct rte_cryptodev_sym_session s = {0};
1060
1061                 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1062                 if (!pool_priv || qp_conf->mp_session->private_data_size <
1063                                 sizeof(*pool_priv)) {
1064                         CDEV_LOG_ERR("Invalid mempool\n");
1065                         return -EINVAL;
1066                 }
1067
1068                 s.nb_drivers = pool_priv->nb_drivers;
1069                 s.user_data_sz = pool_priv->user_data_sz;
1070
1071                 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1072                         obj_size) || (s.nb_drivers <= dev->driver_id) ||
1073                         rte_cryptodev_sym_get_private_session_size(dev_id) >
1074                                 obj_priv_size) {
1075                         CDEV_LOG_ERR("Invalid mempool\n");
1076                         return -EINVAL;
1077                 }
1078         }
1079
1080         if (dev->data->dev_started) {
1081                 CDEV_LOG_ERR(
1082                     "device %d must be stopped to allow configuration", dev_id);
1083                 return -EBUSY;
1084         }
1085
1086         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1087
1088         rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1089         return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1090                         socket_id);
1091 }
1092
1093
1094 int
1095 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1096 {
1097         struct rte_cryptodev *dev;
1098
1099         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1100                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1101                 return -ENODEV;
1102         }
1103
1104         if (stats == NULL) {
1105                 CDEV_LOG_ERR("Invalid stats ptr");
1106                 return -EINVAL;
1107         }
1108
1109         dev = &rte_crypto_devices[dev_id];
1110         memset(stats, 0, sizeof(*stats));
1111
1112         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1113         (*dev->dev_ops->stats_get)(dev, stats);
1114         return 0;
1115 }
1116
1117 void
1118 rte_cryptodev_stats_reset(uint8_t dev_id)
1119 {
1120         struct rte_cryptodev *dev;
1121
1122         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1123                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1124                 return;
1125         }
1126
1127         dev = &rte_crypto_devices[dev_id];
1128
1129         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1130         (*dev->dev_ops->stats_reset)(dev);
1131 }
1132
1133
1134 void
1135 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1136 {
1137         struct rte_cryptodev *dev;
1138
1139         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1140                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1141                 return;
1142         }
1143
1144         dev = &rte_crypto_devices[dev_id];
1145
1146         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1147
1148         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1149         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1150
1151         dev_info->driver_name = dev->device->driver->name;
1152         dev_info->device = dev->device;
1153 }
1154
1155
1156 int
1157 rte_cryptodev_callback_register(uint8_t dev_id,
1158                         enum rte_cryptodev_event_type event,
1159                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1160 {
1161         struct rte_cryptodev *dev;
1162         struct rte_cryptodev_callback *user_cb;
1163
1164         if (!cb_fn)
1165                 return -EINVAL;
1166
1167         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1168                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1169                 return -EINVAL;
1170         }
1171
1172         dev = &rte_crypto_devices[dev_id];
1173         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1174
1175         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1176                 if (user_cb->cb_fn == cb_fn &&
1177                         user_cb->cb_arg == cb_arg &&
1178                         user_cb->event == event) {
1179                         break;
1180                 }
1181         }
1182
1183         /* create a new callback. */
1184         if (user_cb == NULL) {
1185                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1186                                 sizeof(struct rte_cryptodev_callback), 0);
1187                 if (user_cb != NULL) {
1188                         user_cb->cb_fn = cb_fn;
1189                         user_cb->cb_arg = cb_arg;
1190                         user_cb->event = event;
1191                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1192                 }
1193         }
1194
1195         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1196         return (user_cb == NULL) ? -ENOMEM : 0;
1197 }
1198
1199 int
1200 rte_cryptodev_callback_unregister(uint8_t dev_id,
1201                         enum rte_cryptodev_event_type event,
1202                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1203 {
1204         int ret;
1205         struct rte_cryptodev *dev;
1206         struct rte_cryptodev_callback *cb, *next;
1207
1208         if (!cb_fn)
1209                 return -EINVAL;
1210
1211         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1212                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1213                 return -EINVAL;
1214         }
1215
1216         dev = &rte_crypto_devices[dev_id];
1217         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1218
1219         ret = 0;
1220         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1221
1222                 next = TAILQ_NEXT(cb, next);
1223
1224                 if (cb->cb_fn != cb_fn || cb->event != event ||
1225                                 (cb->cb_arg != (void *)-1 &&
1226                                 cb->cb_arg != cb_arg))
1227                         continue;
1228
1229                 /*
1230                  * if this callback is not executing right now,
1231                  * then remove it.
1232                  */
1233                 if (cb->active == 0) {
1234                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1235                         rte_free(cb);
1236                 } else {
1237                         ret = -EAGAIN;
1238                 }
1239         }
1240
1241         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1242         return ret;
1243 }
1244
1245 void
1246 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1247         enum rte_cryptodev_event_type event)
1248 {
1249         struct rte_cryptodev_callback *cb_lst;
1250         struct rte_cryptodev_callback dev_cb;
1251
1252         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1253         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1254                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1255                         continue;
1256                 dev_cb = *cb_lst;
1257                 cb_lst->active = 1;
1258                 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1259                 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1260                                                 dev_cb.cb_arg);
1261                 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1262                 cb_lst->active = 0;
1263         }
1264         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1265 }
1266
1267
1268 int
1269 rte_cryptodev_sym_session_init(uint8_t dev_id,
1270                 struct rte_cryptodev_sym_session *sess,
1271                 struct rte_crypto_sym_xform *xforms,
1272                 struct rte_mempool *mp)
1273 {
1274         struct rte_cryptodev *dev;
1275         uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1276                         dev_id);
1277         uint8_t index;
1278         int ret;
1279
1280         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1281                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1282                 return -EINVAL;
1283         }
1284
1285         dev = rte_cryptodev_pmd_get_dev(dev_id);
1286
1287         if (sess == NULL || xforms == NULL || dev == NULL)
1288                 return -EINVAL;
1289
1290         if (mp->elt_size < sess_priv_sz)
1291                 return -EINVAL;
1292
1293         index = dev->driver_id;
1294         if (index >= sess->nb_drivers)
1295                 return -EINVAL;
1296
1297         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1298
1299         if (sess->sess_data[index].refcnt == 0) {
1300                 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1301                                                         sess, mp);
1302                 if (ret < 0) {
1303                         CDEV_LOG_ERR(
1304                                 "dev_id %d failed to configure session details",
1305                                 dev_id);
1306                         return ret;
1307                 }
1308         }
1309
1310         rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1311         sess->sess_data[index].refcnt++;
1312         return 0;
1313 }
1314
1315 int
1316 rte_cryptodev_asym_session_init(uint8_t dev_id,
1317                 struct rte_cryptodev_asym_session *sess,
1318                 struct rte_crypto_asym_xform *xforms,
1319                 struct rte_mempool *mp)
1320 {
1321         struct rte_cryptodev *dev;
1322         uint8_t index;
1323         int ret;
1324
1325         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1326                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1327                 return -EINVAL;
1328         }
1329
1330         dev = rte_cryptodev_pmd_get_dev(dev_id);
1331
1332         if (sess == NULL || xforms == NULL || dev == NULL)
1333                 return -EINVAL;
1334
1335         index = dev->driver_id;
1336
1337         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1338                                 -ENOTSUP);
1339
1340         if (sess->sess_private_data[index] == NULL) {
1341                 ret = dev->dev_ops->asym_session_configure(dev,
1342                                                         xforms,
1343                                                         sess, mp);
1344                 if (ret < 0) {
1345                         CDEV_LOG_ERR(
1346                                 "dev_id %d failed to configure session details",
1347                                 dev_id);
1348                         return ret;
1349                 }
1350         }
1351
1352         rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1353         return 0;
1354 }
1355
1356 struct rte_mempool *
1357 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1358         uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1359         int socket_id)
1360 {
1361         struct rte_mempool *mp;
1362         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1363         uint32_t obj_sz;
1364
1365         obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1366         if (obj_sz > elt_size)
1367                 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1368                                 obj_sz);
1369         else
1370                 obj_sz = elt_size;
1371
1372         mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1373                         (uint32_t)(sizeof(*pool_priv)),
1374                         NULL, NULL, NULL, NULL,
1375                         socket_id, 0);
1376         if (mp == NULL) {
1377                 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1378                         __func__, name, rte_errno);
1379                 return NULL;
1380         }
1381
1382         pool_priv = rte_mempool_get_priv(mp);
1383         if (!pool_priv) {
1384                 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1385                         __func__, name);
1386                 rte_mempool_free(mp);
1387                 return NULL;
1388         }
1389
1390         pool_priv->nb_drivers = nb_drivers;
1391         pool_priv->user_data_sz = user_data_size;
1392
1393         rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1394                 elt_size, cache_size, user_data_size, mp);
1395         return mp;
1396 }
1397
1398 static unsigned int
1399 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1400 {
1401         return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1402                         sess->user_data_sz;
1403 }
1404
1405 struct rte_cryptodev_sym_session *
1406 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1407 {
1408         struct rte_cryptodev_sym_session *sess;
1409         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1410
1411         if (!mp) {
1412                 CDEV_LOG_ERR("Invalid mempool\n");
1413                 return NULL;
1414         }
1415
1416         pool_priv = rte_mempool_get_priv(mp);
1417
1418         if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) {
1419                 CDEV_LOG_ERR("Invalid mempool\n");
1420                 return NULL;
1421         }
1422
1423         /* Allocate a session structure from the session pool */
1424         if (rte_mempool_get(mp, (void **)&sess)) {
1425                 CDEV_LOG_ERR("couldn't get object from session mempool");
1426                 return NULL;
1427         }
1428
1429         sess->nb_drivers = pool_priv->nb_drivers;
1430         sess->user_data_sz = pool_priv->user_data_sz;
1431         sess->opaque_data = 0;
1432
1433         /* Clear device session pointer.
1434          * Include the flag indicating presence of user data
1435          */
1436         memset(sess->sess_data, 0,
1437                         rte_cryptodev_sym_session_data_size(sess));
1438
1439         rte_cryptodev_trace_sym_session_create(mp, sess);
1440         return sess;
1441 }
1442
1443 struct rte_cryptodev_asym_session *
1444 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1445 {
1446         struct rte_cryptodev_asym_session *sess;
1447
1448         /* Allocate a session structure from the session pool */
1449         if (rte_mempool_get(mp, (void **)&sess)) {
1450                 CDEV_LOG_ERR("couldn't get object from session mempool");
1451                 return NULL;
1452         }
1453
1454         /* Clear device session pointer.
1455          * Include the flag indicating presence of private data
1456          */
1457         memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1458
1459         rte_cryptodev_trace_asym_session_create(mp, sess);
1460         return sess;
1461 }
1462
1463 int
1464 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1465                 struct rte_cryptodev_sym_session *sess)
1466 {
1467         struct rte_cryptodev *dev;
1468         uint8_t driver_id;
1469
1470         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1471                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1472                 return -EINVAL;
1473         }
1474
1475         dev = rte_cryptodev_pmd_get_dev(dev_id);
1476
1477         if (dev == NULL || sess == NULL)
1478                 return -EINVAL;
1479
1480         driver_id = dev->driver_id;
1481         if (sess->sess_data[driver_id].refcnt == 0)
1482                 return 0;
1483         if (--sess->sess_data[driver_id].refcnt != 0)
1484                 return -EBUSY;
1485
1486         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1487
1488         dev->dev_ops->sym_session_clear(dev, sess);
1489
1490         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1491         return 0;
1492 }
1493
1494 int
1495 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1496                 struct rte_cryptodev_asym_session *sess)
1497 {
1498         struct rte_cryptodev *dev;
1499
1500         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1501                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1502                 return -EINVAL;
1503         }
1504
1505         dev = rte_cryptodev_pmd_get_dev(dev_id);
1506
1507         if (dev == NULL || sess == NULL)
1508                 return -EINVAL;
1509
1510         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1511
1512         dev->dev_ops->asym_session_clear(dev, sess);
1513
1514         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1515         return 0;
1516 }
1517
1518 int
1519 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1520 {
1521         uint8_t i;
1522         struct rte_mempool *sess_mp;
1523
1524         if (sess == NULL)
1525                 return -EINVAL;
1526
1527         /* Check that all device private data has been freed */
1528         for (i = 0; i < sess->nb_drivers; i++) {
1529                 if (sess->sess_data[i].refcnt != 0)
1530                         return -EBUSY;
1531         }
1532
1533         /* Return session to mempool */
1534         sess_mp = rte_mempool_from_obj(sess);
1535         rte_mempool_put(sess_mp, sess);
1536
1537         rte_cryptodev_trace_sym_session_free(sess);
1538         return 0;
1539 }
1540
1541 int
1542 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1543 {
1544         uint8_t i;
1545         void *sess_priv;
1546         struct rte_mempool *sess_mp;
1547
1548         if (sess == NULL)
1549                 return -EINVAL;
1550
1551         /* Check that all device private data has been freed */
1552         for (i = 0; i < nb_drivers; i++) {
1553                 sess_priv = get_asym_session_private_data(sess, i);
1554                 if (sess_priv != NULL)
1555                         return -EBUSY;
1556         }
1557
1558         /* Return session to mempool */
1559         sess_mp = rte_mempool_from_obj(sess);
1560         rte_mempool_put(sess_mp, sess);
1561
1562         rte_cryptodev_trace_asym_session_free(sess);
1563         return 0;
1564 }
1565
1566 unsigned int
1567 rte_cryptodev_sym_get_header_session_size(void)
1568 {
1569         /*
1570          * Header contains pointers to the private data of all registered
1571          * drivers and all necessary information to ensure safely clear
1572          * or free al session.
1573          */
1574         struct rte_cryptodev_sym_session s = {0};
1575
1576         s.nb_drivers = nb_drivers;
1577
1578         return (unsigned int)(sizeof(s) +
1579                         rte_cryptodev_sym_session_data_size(&s));
1580 }
1581
1582 unsigned int
1583 rte_cryptodev_sym_get_existing_header_session_size(
1584                 struct rte_cryptodev_sym_session *sess)
1585 {
1586         if (!sess)
1587                 return 0;
1588         else
1589                 return (unsigned int)(sizeof(*sess) +
1590                                 rte_cryptodev_sym_session_data_size(sess));
1591 }
1592
1593 unsigned int
1594 rte_cryptodev_asym_get_header_session_size(void)
1595 {
1596         /*
1597          * Header contains pointers to the private data
1598          * of all registered drivers, and a flag which
1599          * indicates presence of private data
1600          */
1601         return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1602 }
1603
1604 unsigned int
1605 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1606 {
1607         struct rte_cryptodev *dev;
1608         unsigned int priv_sess_size;
1609
1610         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1611                 return 0;
1612
1613         dev = rte_cryptodev_pmd_get_dev(dev_id);
1614
1615         if (*dev->dev_ops->sym_session_get_size == NULL)
1616                 return 0;
1617
1618         priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1619
1620         return priv_sess_size;
1621 }
1622
1623 unsigned int
1624 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1625 {
1626         struct rte_cryptodev *dev;
1627         unsigned int header_size = sizeof(void *) * nb_drivers;
1628         unsigned int priv_sess_size;
1629
1630         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1631                 return 0;
1632
1633         dev = rte_cryptodev_pmd_get_dev(dev_id);
1634
1635         if (*dev->dev_ops->asym_session_get_size == NULL)
1636                 return 0;
1637
1638         priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1639         if (priv_sess_size < header_size)
1640                 return header_size;
1641
1642         return priv_sess_size;
1643
1644 }
1645
1646 int
1647 rte_cryptodev_sym_session_set_user_data(
1648                                         struct rte_cryptodev_sym_session *sess,
1649                                         void *data,
1650                                         uint16_t size)
1651 {
1652         if (sess == NULL)
1653                 return -EINVAL;
1654
1655         if (sess->user_data_sz < size)
1656                 return -ENOMEM;
1657
1658         rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1659         return 0;
1660 }
1661
1662 void *
1663 rte_cryptodev_sym_session_get_user_data(
1664                                         struct rte_cryptodev_sym_session *sess)
1665 {
1666         if (sess == NULL || sess->user_data_sz == 0)
1667                 return NULL;
1668
1669         return (void *)(sess->sess_data + sess->nb_drivers);
1670 }
1671
1672 static inline void
1673 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
1674 {
1675         uint32_t i;
1676         for (i = 0; i < vec->num; i++)
1677                 vec->status[i] = errnum;
1678 }
1679
1680 uint32_t
1681 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1682         struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1683         struct rte_crypto_sym_vec *vec)
1684 {
1685         struct rte_cryptodev *dev;
1686
1687         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1688                 sym_crypto_fill_status(vec, EINVAL);
1689                 return 0;
1690         }
1691
1692         dev = rte_cryptodev_pmd_get_dev(dev_id);
1693
1694         if (*dev->dev_ops->sym_cpu_process == NULL ||
1695                 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
1696                 sym_crypto_fill_status(vec, ENOTSUP);
1697                 return 0;
1698         }
1699
1700         return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
1701 }
1702
1703 /** Initialise rte_crypto_op mempool element */
1704 static void
1705 rte_crypto_op_init(struct rte_mempool *mempool,
1706                 void *opaque_arg,
1707                 void *_op_data,
1708                 __rte_unused unsigned i)
1709 {
1710         struct rte_crypto_op *op = _op_data;
1711         enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1712
1713         memset(_op_data, 0, mempool->elt_size);
1714
1715         __rte_crypto_op_reset(op, type);
1716
1717         op->phys_addr = rte_mem_virt2iova(_op_data);
1718         op->mempool = mempool;
1719 }
1720
1721
1722 struct rte_mempool *
1723 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1724                 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1725                 int socket_id)
1726 {
1727         struct rte_crypto_op_pool_private *priv;
1728
1729         unsigned elt_size = sizeof(struct rte_crypto_op) +
1730                         priv_size;
1731
1732         if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1733                 elt_size += sizeof(struct rte_crypto_sym_op);
1734         } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1735                 elt_size += sizeof(struct rte_crypto_asym_op);
1736         } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1737                 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1738                                     sizeof(struct rte_crypto_asym_op));
1739         } else {
1740                 CDEV_LOG_ERR("Invalid op_type\n");
1741                 return NULL;
1742         }
1743
1744         /* lookup mempool in case already allocated */
1745         struct rte_mempool *mp = rte_mempool_lookup(name);
1746
1747         if (mp != NULL) {
1748                 priv = (struct rte_crypto_op_pool_private *)
1749                                 rte_mempool_get_priv(mp);
1750
1751                 if (mp->elt_size != elt_size ||
1752                                 mp->cache_size < cache_size ||
1753                                 mp->size < nb_elts ||
1754                                 priv->priv_size <  priv_size) {
1755                         mp = NULL;
1756                         CDEV_LOG_ERR("Mempool %s already exists but with "
1757                                         "incompatible parameters", name);
1758                         return NULL;
1759                 }
1760                 return mp;
1761         }
1762
1763         mp = rte_mempool_create(
1764                         name,
1765                         nb_elts,
1766                         elt_size,
1767                         cache_size,
1768                         sizeof(struct rte_crypto_op_pool_private),
1769                         NULL,
1770                         NULL,
1771                         rte_crypto_op_init,
1772                         &type,
1773                         socket_id,
1774                         0);
1775
1776         if (mp == NULL) {
1777                 CDEV_LOG_ERR("Failed to create mempool %s", name);
1778                 return NULL;
1779         }
1780
1781         priv = (struct rte_crypto_op_pool_private *)
1782                         rte_mempool_get_priv(mp);
1783
1784         priv->priv_size = priv_size;
1785         priv->type = type;
1786
1787         return mp;
1788 }
1789
1790 int
1791 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1792 {
1793         struct rte_cryptodev *dev = NULL;
1794         uint32_t i = 0;
1795
1796         if (name == NULL)
1797                 return -EINVAL;
1798
1799         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1800                 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1801                                 "%s_%u", dev_name_prefix, i);
1802
1803                 if (ret < 0)
1804                         return ret;
1805
1806                 dev = rte_cryptodev_pmd_get_named_dev(name);
1807                 if (!dev)
1808                         return 0;
1809         }
1810
1811         return -1;
1812 }
1813
1814 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1815
1816 static struct cryptodev_driver_list cryptodev_driver_list =
1817         TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1818
1819 int
1820 rte_cryptodev_driver_id_get(const char *name)
1821 {
1822         struct cryptodev_driver *driver;
1823         const char *driver_name;
1824
1825         if (name == NULL) {
1826                 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1827                 return -1;
1828         }
1829
1830         TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1831                 driver_name = driver->driver->name;
1832                 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
1833                         return driver->id;
1834         }
1835         return -1;
1836 }
1837
1838 const char *
1839 rte_cryptodev_name_get(uint8_t dev_id)
1840 {
1841         struct rte_cryptodev *dev;
1842
1843         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1844                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1845                 return NULL;
1846         }
1847
1848         dev = rte_cryptodev_pmd_get_dev(dev_id);
1849         if (dev == NULL)
1850                 return NULL;
1851
1852         return dev->data->name;
1853 }
1854
1855 const char *
1856 rte_cryptodev_driver_name_get(uint8_t driver_id)
1857 {
1858         struct cryptodev_driver *driver;
1859
1860         TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1861                 if (driver->id == driver_id)
1862                         return driver->driver->name;
1863         return NULL;
1864 }
1865
1866 uint8_t
1867 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1868                 const struct rte_driver *drv)
1869 {
1870         crypto_drv->driver = drv;
1871         crypto_drv->id = nb_drivers;
1872
1873         TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1874
1875         return nb_drivers++;
1876 }