vhost/crypto: add missing user protocol flag
[dpdk.git] / lib / librte_cryptodev / rte_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43 #include "rte_cryptodev_trace.h"
44
45 static uint8_t nb_drivers;
46
47 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
48
49 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
50
51 static struct rte_cryptodev_global cryptodev_globals = {
52                 .devs                   = rte_crypto_devices,
53                 .data                   = { NULL },
54                 .nb_devs                = 0
55 };
56
57 /* spinlock for crypto device callbacks */
58 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
59
60
61 /**
62  * The user application callback description.
63  *
64  * It contains callback address to be registered by user application,
65  * the pointer to the parameters for callback, and the event type.
66  */
67 struct rte_cryptodev_callback {
68         TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
69         rte_cryptodev_cb_fn cb_fn;              /**< Callback address */
70         void *cb_arg;                           /**< Parameter for callback */
71         enum rte_cryptodev_event_type event;    /**< Interrupt event type */
72         uint32_t active;                        /**< Callback is executing */
73 };
74
75 /**
76  * The crypto cipher algorithm strings identifiers.
77  * It could be used in application command line.
78  */
79 const char *
80 rte_crypto_cipher_algorithm_strings[] = {
81         [RTE_CRYPTO_CIPHER_3DES_CBC]    = "3des-cbc",
82         [RTE_CRYPTO_CIPHER_3DES_ECB]    = "3des-ecb",
83         [RTE_CRYPTO_CIPHER_3DES_CTR]    = "3des-ctr",
84
85         [RTE_CRYPTO_CIPHER_AES_CBC]     = "aes-cbc",
86         [RTE_CRYPTO_CIPHER_AES_CTR]     = "aes-ctr",
87         [RTE_CRYPTO_CIPHER_AES_DOCSISBPI]       = "aes-docsisbpi",
88         [RTE_CRYPTO_CIPHER_AES_ECB]     = "aes-ecb",
89         [RTE_CRYPTO_CIPHER_AES_F8]      = "aes-f8",
90         [RTE_CRYPTO_CIPHER_AES_XTS]     = "aes-xts",
91
92         [RTE_CRYPTO_CIPHER_ARC4]        = "arc4",
93
94         [RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
95         [RTE_CRYPTO_CIPHER_DES_DOCSISBPI]       = "des-docsisbpi",
96
97         [RTE_CRYPTO_CIPHER_NULL]        = "null",
98
99         [RTE_CRYPTO_CIPHER_KASUMI_F8]   = "kasumi-f8",
100         [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
101         [RTE_CRYPTO_CIPHER_ZUC_EEA3]    = "zuc-eea3"
102 };
103
104 /**
105  * The crypto cipher operation strings identifiers.
106  * It could be used in application command line.
107  */
108 const char *
109 rte_crypto_cipher_operation_strings[] = {
110                 [RTE_CRYPTO_CIPHER_OP_ENCRYPT]  = "encrypt",
111                 [RTE_CRYPTO_CIPHER_OP_DECRYPT]  = "decrypt"
112 };
113
114 /**
115  * The crypto auth algorithm strings identifiers.
116  * It could be used in application command line.
117  */
118 const char *
119 rte_crypto_auth_algorithm_strings[] = {
120         [RTE_CRYPTO_AUTH_AES_CBC_MAC]   = "aes-cbc-mac",
121         [RTE_CRYPTO_AUTH_AES_CMAC]      = "aes-cmac",
122         [RTE_CRYPTO_AUTH_AES_GMAC]      = "aes-gmac",
123         [RTE_CRYPTO_AUTH_AES_XCBC_MAC]  = "aes-xcbc-mac",
124
125         [RTE_CRYPTO_AUTH_MD5]           = "md5",
126         [RTE_CRYPTO_AUTH_MD5_HMAC]      = "md5-hmac",
127
128         [RTE_CRYPTO_AUTH_NULL]          = "null",
129
130         [RTE_CRYPTO_AUTH_SHA1]          = "sha1",
131         [RTE_CRYPTO_AUTH_SHA1_HMAC]     = "sha1-hmac",
132
133         [RTE_CRYPTO_AUTH_SHA224]        = "sha2-224",
134         [RTE_CRYPTO_AUTH_SHA224_HMAC]   = "sha2-224-hmac",
135         [RTE_CRYPTO_AUTH_SHA256]        = "sha2-256",
136         [RTE_CRYPTO_AUTH_SHA256_HMAC]   = "sha2-256-hmac",
137         [RTE_CRYPTO_AUTH_SHA384]        = "sha2-384",
138         [RTE_CRYPTO_AUTH_SHA384_HMAC]   = "sha2-384-hmac",
139         [RTE_CRYPTO_AUTH_SHA512]        = "sha2-512",
140         [RTE_CRYPTO_AUTH_SHA512_HMAC]   = "sha2-512-hmac",
141
142         [RTE_CRYPTO_AUTH_KASUMI_F9]     = "kasumi-f9",
143         [RTE_CRYPTO_AUTH_SNOW3G_UIA2]   = "snow3g-uia2",
144         [RTE_CRYPTO_AUTH_ZUC_EIA3]      = "zuc-eia3"
145 };
146
147 /**
148  * The crypto AEAD algorithm strings identifiers.
149  * It could be used in application command line.
150  */
151 const char *
152 rte_crypto_aead_algorithm_strings[] = {
153         [RTE_CRYPTO_AEAD_AES_CCM]       = "aes-ccm",
154         [RTE_CRYPTO_AEAD_AES_GCM]       = "aes-gcm",
155 };
156
157 /**
158  * The crypto AEAD operation strings identifiers.
159  * It could be used in application command line.
160  */
161 const char *
162 rte_crypto_aead_operation_strings[] = {
163         [RTE_CRYPTO_AEAD_OP_ENCRYPT]    = "encrypt",
164         [RTE_CRYPTO_AEAD_OP_DECRYPT]    = "decrypt"
165 };
166
167 /**
168  * Asymmetric crypto transform operation strings identifiers.
169  */
170 const char *rte_crypto_asym_xform_strings[] = {
171         [RTE_CRYPTO_ASYM_XFORM_NONE]    = "none",
172         [RTE_CRYPTO_ASYM_XFORM_RSA]     = "rsa",
173         [RTE_CRYPTO_ASYM_XFORM_MODEX]   = "modexp",
174         [RTE_CRYPTO_ASYM_XFORM_MODINV]  = "modinv",
175         [RTE_CRYPTO_ASYM_XFORM_DH]      = "dh",
176         [RTE_CRYPTO_ASYM_XFORM_DSA]     = "dsa",
177         [RTE_CRYPTO_ASYM_XFORM_ECDSA]   = "ecdsa",
178         [RTE_CRYPTO_ASYM_XFORM_ECPM]    = "ecpm",
179 };
180
181 /**
182  * Asymmetric crypto operation strings identifiers.
183  */
184 const char *rte_crypto_asym_op_strings[] = {
185         [RTE_CRYPTO_ASYM_OP_ENCRYPT]    = "encrypt",
186         [RTE_CRYPTO_ASYM_OP_DECRYPT]    = "decrypt",
187         [RTE_CRYPTO_ASYM_OP_SIGN]       = "sign",
188         [RTE_CRYPTO_ASYM_OP_VERIFY]     = "verify",
189         [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]       = "priv_key_generate",
190         [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
191         [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
192 };
193
194 /**
195  * The private data structure stored in the session mempool private data.
196  */
197 struct rte_cryptodev_sym_session_pool_private_data {
198         uint16_t nb_drivers;
199         /**< number of elements in sess_data array */
200         uint16_t user_data_sz;
201         /**< session user data will be placed after sess_data */
202 };
203
204 int
205 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
206                 const char *algo_string)
207 {
208         unsigned int i;
209
210         for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
211                 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
212                         *algo_enum = (enum rte_crypto_cipher_algorithm) i;
213                         return 0;
214                 }
215         }
216
217         /* Invalid string */
218         return -1;
219 }
220
221 int
222 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
223                 const char *algo_string)
224 {
225         unsigned int i;
226
227         for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
228                 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
229                         *algo_enum = (enum rte_crypto_auth_algorithm) i;
230                         return 0;
231                 }
232         }
233
234         /* Invalid string */
235         return -1;
236 }
237
238 int
239 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
240                 const char *algo_string)
241 {
242         unsigned int i;
243
244         for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
245                 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
246                         *algo_enum = (enum rte_crypto_aead_algorithm) i;
247                         return 0;
248                 }
249         }
250
251         /* Invalid string */
252         return -1;
253 }
254
255 int
256 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
257                 const char *xform_string)
258 {
259         unsigned int i;
260
261         for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
262                 if (strcmp(xform_string,
263                         rte_crypto_asym_xform_strings[i]) == 0) {
264                         *xform_enum = (enum rte_crypto_asym_xform_type) i;
265                         return 0;
266                 }
267         }
268
269         /* Invalid string */
270         return -1;
271 }
272
273 /**
274  * The crypto auth operation strings identifiers.
275  * It could be used in application command line.
276  */
277 const char *
278 rte_crypto_auth_operation_strings[] = {
279                 [RTE_CRYPTO_AUTH_OP_VERIFY]     = "verify",
280                 [RTE_CRYPTO_AUTH_OP_GENERATE]   = "generate"
281 };
282
283 const struct rte_cryptodev_symmetric_capability *
284 rte_cryptodev_sym_capability_get(uint8_t dev_id,
285                 const struct rte_cryptodev_sym_capability_idx *idx)
286 {
287         const struct rte_cryptodev_capabilities *capability;
288         struct rte_cryptodev_info dev_info;
289         int i = 0;
290
291         rte_cryptodev_info_get(dev_id, &dev_info);
292
293         while ((capability = &dev_info.capabilities[i++])->op !=
294                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
295                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
296                         continue;
297
298                 if (capability->sym.xform_type != idx->type)
299                         continue;
300
301                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
302                         capability->sym.auth.algo == idx->algo.auth)
303                         return &capability->sym;
304
305                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
306                         capability->sym.cipher.algo == idx->algo.cipher)
307                         return &capability->sym;
308
309                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
310                                 capability->sym.aead.algo == idx->algo.aead)
311                         return &capability->sym;
312         }
313
314         return NULL;
315
316 }
317
318 static int
319 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
320 {
321         unsigned int next_size;
322
323         /* Check lower/upper bounds */
324         if (size < range->min)
325                 return -1;
326
327         if (size > range->max)
328                 return -1;
329
330         /* If range is actually only one value, size is correct */
331         if (range->increment == 0)
332                 return 0;
333
334         /* Check if value is one of the supported sizes */
335         for (next_size = range->min; next_size <= range->max;
336                         next_size += range->increment)
337                 if (size == next_size)
338                         return 0;
339
340         return -1;
341 }
342
343 const struct rte_cryptodev_asymmetric_xform_capability *
344 rte_cryptodev_asym_capability_get(uint8_t dev_id,
345                 const struct rte_cryptodev_asym_capability_idx *idx)
346 {
347         const struct rte_cryptodev_capabilities *capability;
348         struct rte_cryptodev_info dev_info;
349         unsigned int i = 0;
350
351         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
352         rte_cryptodev_info_get(dev_id, &dev_info);
353
354         while ((capability = &dev_info.capabilities[i++])->op !=
355                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
356                 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
357                         continue;
358
359                 if (capability->asym.xform_capa.xform_type == idx->type)
360                         return &capability->asym.xform_capa;
361         }
362         return NULL;
363 };
364
365 int
366 rte_cryptodev_sym_capability_check_cipher(
367                 const struct rte_cryptodev_symmetric_capability *capability,
368                 uint16_t key_size, uint16_t iv_size)
369 {
370         if (param_range_check(key_size, &capability->cipher.key_size) != 0)
371                 return -1;
372
373         if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
374                 return -1;
375
376         return 0;
377 }
378
379 int
380 rte_cryptodev_sym_capability_check_auth(
381                 const struct rte_cryptodev_symmetric_capability *capability,
382                 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
383 {
384         if (param_range_check(key_size, &capability->auth.key_size) != 0)
385                 return -1;
386
387         if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
388                 return -1;
389
390         if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
391                 return -1;
392
393         return 0;
394 }
395
396 int
397 rte_cryptodev_sym_capability_check_aead(
398                 const struct rte_cryptodev_symmetric_capability *capability,
399                 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
400                 uint16_t iv_size)
401 {
402         if (param_range_check(key_size, &capability->aead.key_size) != 0)
403                 return -1;
404
405         if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
406                 return -1;
407
408         if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
409                 return -1;
410
411         if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
412                 return -1;
413
414         return 0;
415 }
416 int
417 rte_cryptodev_asym_xform_capability_check_optype(
418         const struct rte_cryptodev_asymmetric_xform_capability *capability,
419         enum rte_crypto_asym_op_type op_type)
420 {
421         if (capability->op_types & (1 << op_type))
422                 return 1;
423
424         return 0;
425 }
426
427 int
428 rte_cryptodev_asym_xform_capability_check_modlen(
429         const struct rte_cryptodev_asymmetric_xform_capability *capability,
430         uint16_t modlen)
431 {
432         /* no need to check for limits, if min or max = 0 */
433         if (capability->modlen.min != 0) {
434                 if (modlen < capability->modlen.min)
435                         return -1;
436         }
437
438         if (capability->modlen.max != 0) {
439                 if (modlen > capability->modlen.max)
440                         return -1;
441         }
442
443         /* in any case, check if given modlen is module increment */
444         if (capability->modlen.increment != 0) {
445                 if (modlen % (capability->modlen.increment))
446                         return -1;
447         }
448
449         return 0;
450 }
451
452
453 const char *
454 rte_cryptodev_get_feature_name(uint64_t flag)
455 {
456         switch (flag) {
457         case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
458                 return "SYMMETRIC_CRYPTO";
459         case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
460                 return "ASYMMETRIC_CRYPTO";
461         case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
462                 return "SYM_OPERATION_CHAINING";
463         case RTE_CRYPTODEV_FF_CPU_SSE:
464                 return "CPU_SSE";
465         case RTE_CRYPTODEV_FF_CPU_AVX:
466                 return "CPU_AVX";
467         case RTE_CRYPTODEV_FF_CPU_AVX2:
468                 return "CPU_AVX2";
469         case RTE_CRYPTODEV_FF_CPU_AVX512:
470                 return "CPU_AVX512";
471         case RTE_CRYPTODEV_FF_CPU_AESNI:
472                 return "CPU_AESNI";
473         case RTE_CRYPTODEV_FF_HW_ACCELERATED:
474                 return "HW_ACCELERATED";
475         case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
476                 return "IN_PLACE_SGL";
477         case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
478                 return "OOP_SGL_IN_SGL_OUT";
479         case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
480                 return "OOP_SGL_IN_LB_OUT";
481         case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
482                 return "OOP_LB_IN_SGL_OUT";
483         case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
484                 return "OOP_LB_IN_LB_OUT";
485         case RTE_CRYPTODEV_FF_CPU_NEON:
486                 return "CPU_NEON";
487         case RTE_CRYPTODEV_FF_CPU_ARM_CE:
488                 return "CPU_ARM_CE";
489         case RTE_CRYPTODEV_FF_SECURITY:
490                 return "SECURITY_PROTOCOL";
491         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
492                 return "RSA_PRIV_OP_KEY_EXP";
493         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
494                 return "RSA_PRIV_OP_KEY_QT";
495         case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
496                 return "DIGEST_ENCRYPTED";
497         case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
498                 return "SYM_CPU_CRYPTO";
499         case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
500                 return "ASYM_SESSIONLESS";
501         case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
502                 return "SYM_SESSIONLESS";
503         default:
504                 return NULL;
505         }
506 }
507
508 struct rte_cryptodev *
509 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
510 {
511         return &cryptodev_globals.devs[dev_id];
512 }
513
514 struct rte_cryptodev *
515 rte_cryptodev_pmd_get_named_dev(const char *name)
516 {
517         struct rte_cryptodev *dev;
518         unsigned int i;
519
520         if (name == NULL)
521                 return NULL;
522
523         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
524                 dev = &cryptodev_globals.devs[i];
525
526                 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
527                                 (strcmp(dev->data->name, name) == 0))
528                         return dev;
529         }
530
531         return NULL;
532 }
533
534 static inline uint8_t
535 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
536 {
537         if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
538                         rte_crypto_devices[dev_id].data == NULL)
539                 return 0;
540
541         return 1;
542 }
543
544 unsigned int
545 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
546 {
547         struct rte_cryptodev *dev = NULL;
548
549         if (!rte_cryptodev_is_valid_device_data(dev_id))
550                 return 0;
551
552         dev = rte_cryptodev_pmd_get_dev(dev_id);
553         if (dev->attached != RTE_CRYPTODEV_ATTACHED)
554                 return 0;
555         else
556                 return 1;
557 }
558
559
560 int
561 rte_cryptodev_get_dev_id(const char *name)
562 {
563         unsigned i;
564
565         if (name == NULL)
566                 return -1;
567
568         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
569                 if (!rte_cryptodev_is_valid_device_data(i))
570                         continue;
571                 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
572                                 == 0) &&
573                                 (cryptodev_globals.devs[i].attached ==
574                                                 RTE_CRYPTODEV_ATTACHED))
575                         return i;
576         }
577
578         return -1;
579 }
580
581 uint8_t
582 rte_cryptodev_count(void)
583 {
584         return cryptodev_globals.nb_devs;
585 }
586
587 uint8_t
588 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
589 {
590         uint8_t i, dev_count = 0;
591
592         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
593                 if (cryptodev_globals.devs[i].driver_id == driver_id &&
594                         cryptodev_globals.devs[i].attached ==
595                                         RTE_CRYPTODEV_ATTACHED)
596                         dev_count++;
597
598         return dev_count;
599 }
600
601 uint8_t
602 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
603         uint8_t nb_devices)
604 {
605         uint8_t i, count = 0;
606         struct rte_cryptodev *devs = cryptodev_globals.devs;
607
608         for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
609                 if (!rte_cryptodev_is_valid_device_data(i))
610                         continue;
611
612                 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
613                         int cmp;
614
615                         cmp = strncmp(devs[i].device->driver->name,
616                                         driver_name,
617                                         strlen(driver_name) + 1);
618
619                         if (cmp == 0)
620                                 devices[count++] = devs[i].data->dev_id;
621                 }
622         }
623
624         return count;
625 }
626
627 void *
628 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
629 {
630         if (dev_id < RTE_CRYPTO_MAX_DEVS &&
631                         (rte_crypto_devices[dev_id].feature_flags &
632                         RTE_CRYPTODEV_FF_SECURITY))
633                 return rte_crypto_devices[dev_id].security_ctx;
634
635         return NULL;
636 }
637
638 int
639 rte_cryptodev_socket_id(uint8_t dev_id)
640 {
641         struct rte_cryptodev *dev;
642
643         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
644                 return -1;
645
646         dev = rte_cryptodev_pmd_get_dev(dev_id);
647
648         return dev->data->socket_id;
649 }
650
651 static inline int
652 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
653                 int socket_id)
654 {
655         char mz_name[RTE_MEMZONE_NAMESIZE];
656         const struct rte_memzone *mz;
657         int n;
658
659         /* generate memzone name */
660         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
661         if (n >= (int)sizeof(mz_name))
662                 return -EINVAL;
663
664         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
665                 mz = rte_memzone_reserve(mz_name,
666                                 sizeof(struct rte_cryptodev_data),
667                                 socket_id, 0);
668         } else
669                 mz = rte_memzone_lookup(mz_name);
670
671         if (mz == NULL)
672                 return -ENOMEM;
673
674         *data = mz->addr;
675         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
676                 memset(*data, 0, sizeof(struct rte_cryptodev_data));
677
678         return 0;
679 }
680
681 static inline int
682 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
683 {
684         char mz_name[RTE_MEMZONE_NAMESIZE];
685         const struct rte_memzone *mz;
686         int n;
687
688         /* generate memzone name */
689         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
690         if (n >= (int)sizeof(mz_name))
691                 return -EINVAL;
692
693         mz = rte_memzone_lookup(mz_name);
694         if (mz == NULL)
695                 return -ENOMEM;
696
697         RTE_ASSERT(*data == mz->addr);
698         *data = NULL;
699
700         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
701                 return rte_memzone_free(mz);
702
703         return 0;
704 }
705
706 static uint8_t
707 rte_cryptodev_find_free_device_index(void)
708 {
709         uint8_t dev_id;
710
711         for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
712                 if (rte_crypto_devices[dev_id].attached ==
713                                 RTE_CRYPTODEV_DETACHED)
714                         return dev_id;
715         }
716         return RTE_CRYPTO_MAX_DEVS;
717 }
718
719 struct rte_cryptodev *
720 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
721 {
722         struct rte_cryptodev *cryptodev;
723         uint8_t dev_id;
724
725         if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
726                 CDEV_LOG_ERR("Crypto device with name %s already "
727                                 "allocated!", name);
728                 return NULL;
729         }
730
731         dev_id = rte_cryptodev_find_free_device_index();
732         if (dev_id == RTE_CRYPTO_MAX_DEVS) {
733                 CDEV_LOG_ERR("Reached maximum number of crypto devices");
734                 return NULL;
735         }
736
737         cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
738
739         if (cryptodev->data == NULL) {
740                 struct rte_cryptodev_data **cryptodev_data =
741                                 &cryptodev_globals.data[dev_id];
742
743                 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
744                                 socket_id);
745
746                 if (retval < 0 || *cryptodev_data == NULL)
747                         return NULL;
748
749                 cryptodev->data = *cryptodev_data;
750
751                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
752                         strlcpy(cryptodev->data->name, name,
753                                 RTE_CRYPTODEV_NAME_MAX_LEN);
754
755                         cryptodev->data->dev_id = dev_id;
756                         cryptodev->data->socket_id = socket_id;
757                         cryptodev->data->dev_started = 0;
758                 }
759
760                 /* init user callbacks */
761                 TAILQ_INIT(&(cryptodev->link_intr_cbs));
762
763                 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
764
765                 cryptodev_globals.nb_devs++;
766         }
767
768         return cryptodev;
769 }
770
771 int
772 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
773 {
774         int ret;
775         uint8_t dev_id;
776
777         if (cryptodev == NULL)
778                 return -EINVAL;
779
780         dev_id = cryptodev->data->dev_id;
781
782         /* Close device only if device operations have been set */
783         if (cryptodev->dev_ops) {
784                 ret = rte_cryptodev_close(dev_id);
785                 if (ret < 0)
786                         return ret;
787         }
788
789         ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
790         if (ret < 0)
791                 return ret;
792
793         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
794         cryptodev_globals.nb_devs--;
795         return 0;
796 }
797
798 uint16_t
799 rte_cryptodev_queue_pair_count(uint8_t dev_id)
800 {
801         struct rte_cryptodev *dev;
802
803         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
804                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
805                 return 0;
806         }
807
808         dev = &rte_crypto_devices[dev_id];
809         return dev->data->nb_queue_pairs;
810 }
811
812 static int
813 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
814                 int socket_id)
815 {
816         struct rte_cryptodev_info dev_info;
817         void **qp;
818         unsigned i;
819
820         if ((dev == NULL) || (nb_qpairs < 1)) {
821                 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
822                                                         dev, nb_qpairs);
823                 return -EINVAL;
824         }
825
826         CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
827                         nb_qpairs, dev->data->dev_id);
828
829         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
830
831         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
832         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
833
834         if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
835                 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
836                                 nb_qpairs, dev->data->dev_id);
837             return -EINVAL;
838         }
839
840         if (dev->data->queue_pairs == NULL) { /* first time configuration */
841                 dev->data->queue_pairs = rte_zmalloc_socket(
842                                 "cryptodev->queue_pairs",
843                                 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
844                                 RTE_CACHE_LINE_SIZE, socket_id);
845
846                 if (dev->data->queue_pairs == NULL) {
847                         dev->data->nb_queue_pairs = 0;
848                         CDEV_LOG_ERR("failed to get memory for qp meta data, "
849                                                         "nb_queues %u",
850                                                         nb_qpairs);
851                         return -(ENOMEM);
852                 }
853         } else { /* re-configure */
854                 int ret;
855                 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
856
857                 qp = dev->data->queue_pairs;
858
859                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
860                                 -ENOTSUP);
861
862                 for (i = nb_qpairs; i < old_nb_queues; i++) {
863                         ret = (*dev->dev_ops->queue_pair_release)(dev, i);
864                         if (ret < 0)
865                                 return ret;
866                 }
867
868                 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
869                                 RTE_CACHE_LINE_SIZE);
870                 if (qp == NULL) {
871                         CDEV_LOG_ERR("failed to realloc qp meta data,"
872                                                 " nb_queues %u", nb_qpairs);
873                         return -(ENOMEM);
874                 }
875
876                 if (nb_qpairs > old_nb_queues) {
877                         uint16_t new_qs = nb_qpairs - old_nb_queues;
878
879                         memset(qp + old_nb_queues, 0,
880                                 sizeof(qp[0]) * new_qs);
881                 }
882
883                 dev->data->queue_pairs = qp;
884
885         }
886         dev->data->nb_queue_pairs = nb_qpairs;
887         return 0;
888 }
889
890 int
891 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
892 {
893         struct rte_cryptodev *dev;
894         int diag;
895
896         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
897                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
898                 return -EINVAL;
899         }
900
901         dev = &rte_crypto_devices[dev_id];
902
903         if (dev->data->dev_started) {
904                 CDEV_LOG_ERR(
905                     "device %d must be stopped to allow configuration", dev_id);
906                 return -EBUSY;
907         }
908
909         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
910
911         /* Setup new number of queue pairs and reconfigure device. */
912         diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
913                         config->socket_id);
914         if (diag != 0) {
915                 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
916                                 dev_id, diag);
917                 return diag;
918         }
919
920         rte_cryptodev_trace_configure(dev_id, config);
921         return (*dev->dev_ops->dev_configure)(dev, config);
922 }
923
924
925 int
926 rte_cryptodev_start(uint8_t dev_id)
927 {
928         struct rte_cryptodev *dev;
929         int diag;
930
931         CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
932
933         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
934                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
935                 return -EINVAL;
936         }
937
938         dev = &rte_crypto_devices[dev_id];
939
940         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
941
942         if (dev->data->dev_started != 0) {
943                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
944                         dev_id);
945                 return 0;
946         }
947
948         diag = (*dev->dev_ops->dev_start)(dev);
949         rte_cryptodev_trace_start(dev_id, diag);
950         if (diag == 0)
951                 dev->data->dev_started = 1;
952         else
953                 return diag;
954
955         return 0;
956 }
957
958 void
959 rte_cryptodev_stop(uint8_t dev_id)
960 {
961         struct rte_cryptodev *dev;
962
963         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
964                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
965                 return;
966         }
967
968         dev = &rte_crypto_devices[dev_id];
969
970         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
971
972         if (dev->data->dev_started == 0) {
973                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
974                         dev_id);
975                 return;
976         }
977
978         (*dev->dev_ops->dev_stop)(dev);
979         rte_cryptodev_trace_stop(dev_id);
980         dev->data->dev_started = 0;
981 }
982
983 int
984 rte_cryptodev_close(uint8_t dev_id)
985 {
986         struct rte_cryptodev *dev;
987         int retval;
988
989         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
990                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
991                 return -1;
992         }
993
994         dev = &rte_crypto_devices[dev_id];
995
996         /* Device must be stopped before it can be closed */
997         if (dev->data->dev_started == 1) {
998                 CDEV_LOG_ERR("Device %u must be stopped before closing",
999                                 dev_id);
1000                 return -EBUSY;
1001         }
1002
1003         /* We can't close the device if there are outstanding sessions in use */
1004         if (dev->data->session_pool != NULL) {
1005                 if (!rte_mempool_full(dev->data->session_pool)) {
1006                         CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1007                                         "has sessions still in use, free "
1008                                         "all sessions before calling close",
1009                                         (unsigned)dev_id);
1010                         return -EBUSY;
1011                 }
1012         }
1013
1014         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1015         retval = (*dev->dev_ops->dev_close)(dev);
1016         rte_cryptodev_trace_close(dev_id, retval);
1017
1018         if (retval < 0)
1019                 return retval;
1020
1021         return 0;
1022 }
1023
1024 int
1025 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1026                 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1027
1028 {
1029         struct rte_cryptodev *dev;
1030
1031         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1032                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1033                 return -EINVAL;
1034         }
1035
1036         dev = &rte_crypto_devices[dev_id];
1037         if (queue_pair_id >= dev->data->nb_queue_pairs) {
1038                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1039                 return -EINVAL;
1040         }
1041
1042         if (!qp_conf) {
1043                 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1044                 return -EINVAL;
1045         }
1046
1047         if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1048                         (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1049                 CDEV_LOG_ERR("Invalid mempools\n");
1050                 return -EINVAL;
1051         }
1052
1053         if (qp_conf->mp_session) {
1054                 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1055                 uint32_t obj_size = qp_conf->mp_session->elt_size;
1056                 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1057                 struct rte_cryptodev_sym_session s = {0};
1058
1059                 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1060                 if (!pool_priv || qp_conf->mp_session->private_data_size <
1061                                 sizeof(*pool_priv)) {
1062                         CDEV_LOG_ERR("Invalid mempool\n");
1063                         return -EINVAL;
1064                 }
1065
1066                 s.nb_drivers = pool_priv->nb_drivers;
1067                 s.user_data_sz = pool_priv->user_data_sz;
1068
1069                 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1070                         obj_size) || (s.nb_drivers <= dev->driver_id) ||
1071                         rte_cryptodev_sym_get_private_session_size(dev_id) >
1072                                 obj_priv_size) {
1073                         CDEV_LOG_ERR("Invalid mempool\n");
1074                         return -EINVAL;
1075                 }
1076         }
1077
1078         if (dev->data->dev_started) {
1079                 CDEV_LOG_ERR(
1080                     "device %d must be stopped to allow configuration", dev_id);
1081                 return -EBUSY;
1082         }
1083
1084         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1085
1086         rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1087         return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1088                         socket_id);
1089 }
1090
1091
1092 int
1093 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1094 {
1095         struct rte_cryptodev *dev;
1096
1097         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1098                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1099                 return -ENODEV;
1100         }
1101
1102         if (stats == NULL) {
1103                 CDEV_LOG_ERR("Invalid stats ptr");
1104                 return -EINVAL;
1105         }
1106
1107         dev = &rte_crypto_devices[dev_id];
1108         memset(stats, 0, sizeof(*stats));
1109
1110         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1111         (*dev->dev_ops->stats_get)(dev, stats);
1112         return 0;
1113 }
1114
1115 void
1116 rte_cryptodev_stats_reset(uint8_t dev_id)
1117 {
1118         struct rte_cryptodev *dev;
1119
1120         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1121                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1122                 return;
1123         }
1124
1125         dev = &rte_crypto_devices[dev_id];
1126
1127         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1128         (*dev->dev_ops->stats_reset)(dev);
1129 }
1130
1131
1132 void
1133 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1134 {
1135         struct rte_cryptodev *dev;
1136
1137         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1138                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1139                 return;
1140         }
1141
1142         dev = &rte_crypto_devices[dev_id];
1143
1144         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1145
1146         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1147         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1148
1149         dev_info->driver_name = dev->device->driver->name;
1150         dev_info->device = dev->device;
1151 }
1152
1153
1154 int
1155 rte_cryptodev_callback_register(uint8_t dev_id,
1156                         enum rte_cryptodev_event_type event,
1157                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1158 {
1159         struct rte_cryptodev *dev;
1160         struct rte_cryptodev_callback *user_cb;
1161
1162         if (!cb_fn)
1163                 return -EINVAL;
1164
1165         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1166                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1167                 return -EINVAL;
1168         }
1169
1170         dev = &rte_crypto_devices[dev_id];
1171         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1172
1173         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1174                 if (user_cb->cb_fn == cb_fn &&
1175                         user_cb->cb_arg == cb_arg &&
1176                         user_cb->event == event) {
1177                         break;
1178                 }
1179         }
1180
1181         /* create a new callback. */
1182         if (user_cb == NULL) {
1183                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1184                                 sizeof(struct rte_cryptodev_callback), 0);
1185                 if (user_cb != NULL) {
1186                         user_cb->cb_fn = cb_fn;
1187                         user_cb->cb_arg = cb_arg;
1188                         user_cb->event = event;
1189                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1190                 }
1191         }
1192
1193         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1194         return (user_cb == NULL) ? -ENOMEM : 0;
1195 }
1196
1197 int
1198 rte_cryptodev_callback_unregister(uint8_t dev_id,
1199                         enum rte_cryptodev_event_type event,
1200                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1201 {
1202         int ret;
1203         struct rte_cryptodev *dev;
1204         struct rte_cryptodev_callback *cb, *next;
1205
1206         if (!cb_fn)
1207                 return -EINVAL;
1208
1209         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1210                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1211                 return -EINVAL;
1212         }
1213
1214         dev = &rte_crypto_devices[dev_id];
1215         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1216
1217         ret = 0;
1218         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1219
1220                 next = TAILQ_NEXT(cb, next);
1221
1222                 if (cb->cb_fn != cb_fn || cb->event != event ||
1223                                 (cb->cb_arg != (void *)-1 &&
1224                                 cb->cb_arg != cb_arg))
1225                         continue;
1226
1227                 /*
1228                  * if this callback is not executing right now,
1229                  * then remove it.
1230                  */
1231                 if (cb->active == 0) {
1232                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1233                         rte_free(cb);
1234                 } else {
1235                         ret = -EAGAIN;
1236                 }
1237         }
1238
1239         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1240         return ret;
1241 }
1242
1243 void
1244 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1245         enum rte_cryptodev_event_type event)
1246 {
1247         struct rte_cryptodev_callback *cb_lst;
1248         struct rte_cryptodev_callback dev_cb;
1249
1250         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1251         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1252                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1253                         continue;
1254                 dev_cb = *cb_lst;
1255                 cb_lst->active = 1;
1256                 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1257                 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1258                                                 dev_cb.cb_arg);
1259                 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1260                 cb_lst->active = 0;
1261         }
1262         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1263 }
1264
1265
1266 int
1267 rte_cryptodev_sym_session_init(uint8_t dev_id,
1268                 struct rte_cryptodev_sym_session *sess,
1269                 struct rte_crypto_sym_xform *xforms,
1270                 struct rte_mempool *mp)
1271 {
1272         struct rte_cryptodev *dev;
1273         uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1274                         dev_id);
1275         uint8_t index;
1276         int ret;
1277
1278         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1279                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1280                 return -EINVAL;
1281         }
1282
1283         dev = rte_cryptodev_pmd_get_dev(dev_id);
1284
1285         if (sess == NULL || xforms == NULL || dev == NULL)
1286                 return -EINVAL;
1287
1288         if (mp->elt_size < sess_priv_sz)
1289                 return -EINVAL;
1290
1291         index = dev->driver_id;
1292         if (index >= sess->nb_drivers)
1293                 return -EINVAL;
1294
1295         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1296
1297         if (sess->sess_data[index].refcnt == 0) {
1298                 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1299                                                         sess, mp);
1300                 if (ret < 0) {
1301                         CDEV_LOG_ERR(
1302                                 "dev_id %d failed to configure session details",
1303                                 dev_id);
1304                         return ret;
1305                 }
1306         }
1307
1308         rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1309         sess->sess_data[index].refcnt++;
1310         return 0;
1311 }
1312
1313 int
1314 rte_cryptodev_asym_session_init(uint8_t dev_id,
1315                 struct rte_cryptodev_asym_session *sess,
1316                 struct rte_crypto_asym_xform *xforms,
1317                 struct rte_mempool *mp)
1318 {
1319         struct rte_cryptodev *dev;
1320         uint8_t index;
1321         int ret;
1322
1323         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1324                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1325                 return -EINVAL;
1326         }
1327
1328         dev = rte_cryptodev_pmd_get_dev(dev_id);
1329
1330         if (sess == NULL || xforms == NULL || dev == NULL)
1331                 return -EINVAL;
1332
1333         index = dev->driver_id;
1334
1335         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1336                                 -ENOTSUP);
1337
1338         if (sess->sess_private_data[index] == NULL) {
1339                 ret = dev->dev_ops->asym_session_configure(dev,
1340                                                         xforms,
1341                                                         sess, mp);
1342                 if (ret < 0) {
1343                         CDEV_LOG_ERR(
1344                                 "dev_id %d failed to configure session details",
1345                                 dev_id);
1346                         return ret;
1347                 }
1348         }
1349
1350         rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1351         return 0;
1352 }
1353
1354 struct rte_mempool *
1355 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1356         uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1357         int socket_id)
1358 {
1359         struct rte_mempool *mp;
1360         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1361         uint32_t obj_sz;
1362
1363         obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1364         if (obj_sz > elt_size)
1365                 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1366                                 obj_sz);
1367         else
1368                 obj_sz = elt_size;
1369
1370         mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1371                         (uint32_t)(sizeof(*pool_priv)),
1372                         NULL, NULL, NULL, NULL,
1373                         socket_id, 0);
1374         if (mp == NULL) {
1375                 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1376                         __func__, name, rte_errno);
1377                 return NULL;
1378         }
1379
1380         pool_priv = rte_mempool_get_priv(mp);
1381         if (!pool_priv) {
1382                 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1383                         __func__, name);
1384                 rte_mempool_free(mp);
1385                 return NULL;
1386         }
1387
1388         pool_priv->nb_drivers = nb_drivers;
1389         pool_priv->user_data_sz = user_data_size;
1390
1391         rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1392                 elt_size, cache_size, user_data_size, mp);
1393         return mp;
1394 }
1395
1396 static unsigned int
1397 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1398 {
1399         return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1400                         sess->user_data_sz;
1401 }
1402
1403 struct rte_cryptodev_sym_session *
1404 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1405 {
1406         struct rte_cryptodev_sym_session *sess;
1407         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1408
1409         if (!mp) {
1410                 CDEV_LOG_ERR("Invalid mempool\n");
1411                 return NULL;
1412         }
1413
1414         pool_priv = rte_mempool_get_priv(mp);
1415
1416         if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) {
1417                 CDEV_LOG_ERR("Invalid mempool\n");
1418                 return NULL;
1419         }
1420
1421         /* Allocate a session structure from the session pool */
1422         if (rte_mempool_get(mp, (void **)&sess)) {
1423                 CDEV_LOG_ERR("couldn't get object from session mempool");
1424                 return NULL;
1425         }
1426
1427         sess->nb_drivers = pool_priv->nb_drivers;
1428         sess->user_data_sz = pool_priv->user_data_sz;
1429         sess->opaque_data = 0;
1430
1431         /* Clear device session pointer.
1432          * Include the flag indicating presence of user data
1433          */
1434         memset(sess->sess_data, 0,
1435                         rte_cryptodev_sym_session_data_size(sess));
1436
1437         rte_cryptodev_trace_sym_session_create(mp, sess);
1438         return sess;
1439 }
1440
1441 struct rte_cryptodev_asym_session *
1442 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1443 {
1444         struct rte_cryptodev_asym_session *sess;
1445
1446         /* Allocate a session structure from the session pool */
1447         if (rte_mempool_get(mp, (void **)&sess)) {
1448                 CDEV_LOG_ERR("couldn't get object from session mempool");
1449                 return NULL;
1450         }
1451
1452         /* Clear device session pointer.
1453          * Include the flag indicating presence of private data
1454          */
1455         memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1456
1457         rte_cryptodev_trace_asym_session_create(mp, sess);
1458         return sess;
1459 }
1460
1461 int
1462 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1463                 struct rte_cryptodev_sym_session *sess)
1464 {
1465         struct rte_cryptodev *dev;
1466         uint8_t driver_id;
1467
1468         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1469                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1470                 return -EINVAL;
1471         }
1472
1473         dev = rte_cryptodev_pmd_get_dev(dev_id);
1474
1475         if (dev == NULL || sess == NULL)
1476                 return -EINVAL;
1477
1478         driver_id = dev->driver_id;
1479         if (sess->sess_data[driver_id].refcnt == 0)
1480                 return 0;
1481         if (--sess->sess_data[driver_id].refcnt != 0)
1482                 return -EBUSY;
1483
1484         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1485
1486         dev->dev_ops->sym_session_clear(dev, sess);
1487
1488         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1489         return 0;
1490 }
1491
1492 int
1493 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1494                 struct rte_cryptodev_asym_session *sess)
1495 {
1496         struct rte_cryptodev *dev;
1497
1498         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1499                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1500                 return -EINVAL;
1501         }
1502
1503         dev = rte_cryptodev_pmd_get_dev(dev_id);
1504
1505         if (dev == NULL || sess == NULL)
1506                 return -EINVAL;
1507
1508         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1509
1510         dev->dev_ops->asym_session_clear(dev, sess);
1511
1512         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1513         return 0;
1514 }
1515
1516 int
1517 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1518 {
1519         uint8_t i;
1520         struct rte_mempool *sess_mp;
1521
1522         if (sess == NULL)
1523                 return -EINVAL;
1524
1525         /* Check that all device private data has been freed */
1526         for (i = 0; i < sess->nb_drivers; i++) {
1527                 if (sess->sess_data[i].refcnt != 0)
1528                         return -EBUSY;
1529         }
1530
1531         /* Return session to mempool */
1532         sess_mp = rte_mempool_from_obj(sess);
1533         rte_mempool_put(sess_mp, sess);
1534
1535         rte_cryptodev_trace_sym_session_free(sess);
1536         return 0;
1537 }
1538
1539 int
1540 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1541 {
1542         uint8_t i;
1543         void *sess_priv;
1544         struct rte_mempool *sess_mp;
1545
1546         if (sess == NULL)
1547                 return -EINVAL;
1548
1549         /* Check that all device private data has been freed */
1550         for (i = 0; i < nb_drivers; i++) {
1551                 sess_priv = get_asym_session_private_data(sess, i);
1552                 if (sess_priv != NULL)
1553                         return -EBUSY;
1554         }
1555
1556         /* Return session to mempool */
1557         sess_mp = rte_mempool_from_obj(sess);
1558         rte_mempool_put(sess_mp, sess);
1559
1560         rte_cryptodev_trace_asym_session_free(sess);
1561         return 0;
1562 }
1563
1564 unsigned int
1565 rte_cryptodev_sym_get_header_session_size(void)
1566 {
1567         /*
1568          * Header contains pointers to the private data of all registered
1569          * drivers and all necessary information to ensure safely clear
1570          * or free al session.
1571          */
1572         struct rte_cryptodev_sym_session s = {0};
1573
1574         s.nb_drivers = nb_drivers;
1575
1576         return (unsigned int)(sizeof(s) +
1577                         rte_cryptodev_sym_session_data_size(&s));
1578 }
1579
1580 unsigned int
1581 rte_cryptodev_sym_get_existing_header_session_size(
1582                 struct rte_cryptodev_sym_session *sess)
1583 {
1584         if (!sess)
1585                 return 0;
1586         else
1587                 return (unsigned int)(sizeof(*sess) +
1588                                 rte_cryptodev_sym_session_data_size(sess));
1589 }
1590
1591 unsigned int
1592 rte_cryptodev_asym_get_header_session_size(void)
1593 {
1594         /*
1595          * Header contains pointers to the private data
1596          * of all registered drivers, and a flag which
1597          * indicates presence of private data
1598          */
1599         return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1600 }
1601
1602 unsigned int
1603 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1604 {
1605         struct rte_cryptodev *dev;
1606         unsigned int priv_sess_size;
1607
1608         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1609                 return 0;
1610
1611         dev = rte_cryptodev_pmd_get_dev(dev_id);
1612
1613         if (*dev->dev_ops->sym_session_get_size == NULL)
1614                 return 0;
1615
1616         priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1617
1618         return priv_sess_size;
1619 }
1620
1621 unsigned int
1622 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1623 {
1624         struct rte_cryptodev *dev;
1625         unsigned int header_size = sizeof(void *) * nb_drivers;
1626         unsigned int priv_sess_size;
1627
1628         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1629                 return 0;
1630
1631         dev = rte_cryptodev_pmd_get_dev(dev_id);
1632
1633         if (*dev->dev_ops->asym_session_get_size == NULL)
1634                 return 0;
1635
1636         priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1637         if (priv_sess_size < header_size)
1638                 return header_size;
1639
1640         return priv_sess_size;
1641
1642 }
1643
1644 int
1645 rte_cryptodev_sym_session_set_user_data(
1646                                         struct rte_cryptodev_sym_session *sess,
1647                                         void *data,
1648                                         uint16_t size)
1649 {
1650         if (sess == NULL)
1651                 return -EINVAL;
1652
1653         if (sess->user_data_sz < size)
1654                 return -ENOMEM;
1655
1656         rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1657         return 0;
1658 }
1659
1660 void *
1661 rte_cryptodev_sym_session_get_user_data(
1662                                         struct rte_cryptodev_sym_session *sess)
1663 {
1664         if (sess == NULL || sess->user_data_sz == 0)
1665                 return NULL;
1666
1667         return (void *)(sess->sess_data + sess->nb_drivers);
1668 }
1669
1670 static inline void
1671 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
1672 {
1673         uint32_t i;
1674         for (i = 0; i < vec->num; i++)
1675                 vec->status[i] = errnum;
1676 }
1677
1678 uint32_t
1679 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1680         struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1681         struct rte_crypto_sym_vec *vec)
1682 {
1683         struct rte_cryptodev *dev;
1684
1685         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1686                 sym_crypto_fill_status(vec, EINVAL);
1687                 return 0;
1688         }
1689
1690         dev = rte_cryptodev_pmd_get_dev(dev_id);
1691
1692         if (*dev->dev_ops->sym_cpu_process == NULL ||
1693                 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
1694                 sym_crypto_fill_status(vec, ENOTSUP);
1695                 return 0;
1696         }
1697
1698         return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
1699 }
1700
1701 /** Initialise rte_crypto_op mempool element */
1702 static void
1703 rte_crypto_op_init(struct rte_mempool *mempool,
1704                 void *opaque_arg,
1705                 void *_op_data,
1706                 __rte_unused unsigned i)
1707 {
1708         struct rte_crypto_op *op = _op_data;
1709         enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1710
1711         memset(_op_data, 0, mempool->elt_size);
1712
1713         __rte_crypto_op_reset(op, type);
1714
1715         op->phys_addr = rte_mem_virt2iova(_op_data);
1716         op->mempool = mempool;
1717 }
1718
1719
1720 struct rte_mempool *
1721 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1722                 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1723                 int socket_id)
1724 {
1725         struct rte_crypto_op_pool_private *priv;
1726
1727         unsigned elt_size = sizeof(struct rte_crypto_op) +
1728                         priv_size;
1729
1730         if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1731                 elt_size += sizeof(struct rte_crypto_sym_op);
1732         } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1733                 elt_size += sizeof(struct rte_crypto_asym_op);
1734         } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1735                 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1736                                     sizeof(struct rte_crypto_asym_op));
1737         } else {
1738                 CDEV_LOG_ERR("Invalid op_type\n");
1739                 return NULL;
1740         }
1741
1742         /* lookup mempool in case already allocated */
1743         struct rte_mempool *mp = rte_mempool_lookup(name);
1744
1745         if (mp != NULL) {
1746                 priv = (struct rte_crypto_op_pool_private *)
1747                                 rte_mempool_get_priv(mp);
1748
1749                 if (mp->elt_size != elt_size ||
1750                                 mp->cache_size < cache_size ||
1751                                 mp->size < nb_elts ||
1752                                 priv->priv_size <  priv_size) {
1753                         mp = NULL;
1754                         CDEV_LOG_ERR("Mempool %s already exists but with "
1755                                         "incompatible parameters", name);
1756                         return NULL;
1757                 }
1758                 return mp;
1759         }
1760
1761         mp = rte_mempool_create(
1762                         name,
1763                         nb_elts,
1764                         elt_size,
1765                         cache_size,
1766                         sizeof(struct rte_crypto_op_pool_private),
1767                         NULL,
1768                         NULL,
1769                         rte_crypto_op_init,
1770                         &type,
1771                         socket_id,
1772                         0);
1773
1774         if (mp == NULL) {
1775                 CDEV_LOG_ERR("Failed to create mempool %s", name);
1776                 return NULL;
1777         }
1778
1779         priv = (struct rte_crypto_op_pool_private *)
1780                         rte_mempool_get_priv(mp);
1781
1782         priv->priv_size = priv_size;
1783         priv->type = type;
1784
1785         return mp;
1786 }
1787
1788 int
1789 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1790 {
1791         struct rte_cryptodev *dev = NULL;
1792         uint32_t i = 0;
1793
1794         if (name == NULL)
1795                 return -EINVAL;
1796
1797         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1798                 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1799                                 "%s_%u", dev_name_prefix, i);
1800
1801                 if (ret < 0)
1802                         return ret;
1803
1804                 dev = rte_cryptodev_pmd_get_named_dev(name);
1805                 if (!dev)
1806                         return 0;
1807         }
1808
1809         return -1;
1810 }
1811
1812 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1813
1814 static struct cryptodev_driver_list cryptodev_driver_list =
1815         TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1816
1817 int
1818 rte_cryptodev_driver_id_get(const char *name)
1819 {
1820         struct cryptodev_driver *driver;
1821         const char *driver_name;
1822
1823         if (name == NULL) {
1824                 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1825                 return -1;
1826         }
1827
1828         TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1829                 driver_name = driver->driver->name;
1830                 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
1831                         return driver->id;
1832         }
1833         return -1;
1834 }
1835
1836 const char *
1837 rte_cryptodev_name_get(uint8_t dev_id)
1838 {
1839         struct rte_cryptodev *dev;
1840
1841         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1842                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1843                 return NULL;
1844         }
1845
1846         dev = rte_cryptodev_pmd_get_dev(dev_id);
1847         if (dev == NULL)
1848                 return NULL;
1849
1850         return dev->data->name;
1851 }
1852
1853 const char *
1854 rte_cryptodev_driver_name_get(uint8_t driver_id)
1855 {
1856         struct cryptodev_driver *driver;
1857
1858         TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1859                 if (driver->id == driver_id)
1860                         return driver->driver->name;
1861         return NULL;
1862 }
1863
1864 uint8_t
1865 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1866                 const struct rte_driver *drv)
1867 {
1868         crypto_drv->driver = drv;
1869         crypto_drv->id = nb_drivers;
1870
1871         TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1872
1873         return nb_drivers++;
1874 }