cryptodev: fix missing device id range checking
[dpdk.git] / lib / librte_cryptodev / rte_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43
44 static uint8_t nb_drivers;
45
46 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
47
48 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
49
50 static struct rte_cryptodev_global cryptodev_globals = {
51                 .devs                   = rte_crypto_devices,
52                 .data                   = { NULL },
53                 .nb_devs                = 0
54 };
55
56 /* spinlock for crypto device callbacks */
57 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
58
59
60 /**
61  * The user application callback description.
62  *
63  * It contains callback address to be registered by user application,
64  * the pointer to the parameters for callback, and the event type.
65  */
66 struct rte_cryptodev_callback {
67         TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
68         rte_cryptodev_cb_fn cb_fn;              /**< Callback address */
69         void *cb_arg;                           /**< Parameter for callback */
70         enum rte_cryptodev_event_type event;    /**< Interrupt event type */
71         uint32_t active;                        /**< Callback is executing */
72 };
73
74 /**
75  * The crypto cipher algorithm strings identifiers.
76  * It could be used in application command line.
77  */
78 const char *
79 rte_crypto_cipher_algorithm_strings[] = {
80         [RTE_CRYPTO_CIPHER_3DES_CBC]    = "3des-cbc",
81         [RTE_CRYPTO_CIPHER_3DES_ECB]    = "3des-ecb",
82         [RTE_CRYPTO_CIPHER_3DES_CTR]    = "3des-ctr",
83
84         [RTE_CRYPTO_CIPHER_AES_CBC]     = "aes-cbc",
85         [RTE_CRYPTO_CIPHER_AES_CTR]     = "aes-ctr",
86         [RTE_CRYPTO_CIPHER_AES_DOCSISBPI]       = "aes-docsisbpi",
87         [RTE_CRYPTO_CIPHER_AES_ECB]     = "aes-ecb",
88         [RTE_CRYPTO_CIPHER_AES_F8]      = "aes-f8",
89         [RTE_CRYPTO_CIPHER_AES_XTS]     = "aes-xts",
90
91         [RTE_CRYPTO_CIPHER_ARC4]        = "arc4",
92
93         [RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
94         [RTE_CRYPTO_CIPHER_DES_DOCSISBPI]       = "des-docsisbpi",
95
96         [RTE_CRYPTO_CIPHER_NULL]        = "null",
97
98         [RTE_CRYPTO_CIPHER_KASUMI_F8]   = "kasumi-f8",
99         [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
100         [RTE_CRYPTO_CIPHER_ZUC_EEA3]    = "zuc-eea3"
101 };
102
103 /**
104  * The crypto cipher operation strings identifiers.
105  * It could be used in application command line.
106  */
107 const char *
108 rte_crypto_cipher_operation_strings[] = {
109                 [RTE_CRYPTO_CIPHER_OP_ENCRYPT]  = "encrypt",
110                 [RTE_CRYPTO_CIPHER_OP_DECRYPT]  = "decrypt"
111 };
112
113 /**
114  * The crypto auth algorithm strings identifiers.
115  * It could be used in application command line.
116  */
117 const char *
118 rte_crypto_auth_algorithm_strings[] = {
119         [RTE_CRYPTO_AUTH_AES_CBC_MAC]   = "aes-cbc-mac",
120         [RTE_CRYPTO_AUTH_AES_CMAC]      = "aes-cmac",
121         [RTE_CRYPTO_AUTH_AES_GMAC]      = "aes-gmac",
122         [RTE_CRYPTO_AUTH_AES_XCBC_MAC]  = "aes-xcbc-mac",
123
124         [RTE_CRYPTO_AUTH_MD5]           = "md5",
125         [RTE_CRYPTO_AUTH_MD5_HMAC]      = "md5-hmac",
126
127         [RTE_CRYPTO_AUTH_NULL]          = "null",
128
129         [RTE_CRYPTO_AUTH_SHA1]          = "sha1",
130         [RTE_CRYPTO_AUTH_SHA1_HMAC]     = "sha1-hmac",
131
132         [RTE_CRYPTO_AUTH_SHA224]        = "sha2-224",
133         [RTE_CRYPTO_AUTH_SHA224_HMAC]   = "sha2-224-hmac",
134         [RTE_CRYPTO_AUTH_SHA256]        = "sha2-256",
135         [RTE_CRYPTO_AUTH_SHA256_HMAC]   = "sha2-256-hmac",
136         [RTE_CRYPTO_AUTH_SHA384]        = "sha2-384",
137         [RTE_CRYPTO_AUTH_SHA384_HMAC]   = "sha2-384-hmac",
138         [RTE_CRYPTO_AUTH_SHA512]        = "sha2-512",
139         [RTE_CRYPTO_AUTH_SHA512_HMAC]   = "sha2-512-hmac",
140
141         [RTE_CRYPTO_AUTH_KASUMI_F9]     = "kasumi-f9",
142         [RTE_CRYPTO_AUTH_SNOW3G_UIA2]   = "snow3g-uia2",
143         [RTE_CRYPTO_AUTH_ZUC_EIA3]      = "zuc-eia3"
144 };
145
146 /**
147  * The crypto AEAD algorithm strings identifiers.
148  * It could be used in application command line.
149  */
150 const char *
151 rte_crypto_aead_algorithm_strings[] = {
152         [RTE_CRYPTO_AEAD_AES_CCM]       = "aes-ccm",
153         [RTE_CRYPTO_AEAD_AES_GCM]       = "aes-gcm",
154 };
155
156 /**
157  * The crypto AEAD operation strings identifiers.
158  * It could be used in application command line.
159  */
160 const char *
161 rte_crypto_aead_operation_strings[] = {
162         [RTE_CRYPTO_AEAD_OP_ENCRYPT]    = "encrypt",
163         [RTE_CRYPTO_AEAD_OP_DECRYPT]    = "decrypt"
164 };
165
166 /**
167  * Asymmetric crypto transform operation strings identifiers.
168  */
169 const char *rte_crypto_asym_xform_strings[] = {
170         [RTE_CRYPTO_ASYM_XFORM_NONE]    = "none",
171         [RTE_CRYPTO_ASYM_XFORM_RSA]     = "rsa",
172         [RTE_CRYPTO_ASYM_XFORM_MODEX]   = "modexp",
173         [RTE_CRYPTO_ASYM_XFORM_MODINV]  = "modinv",
174         [RTE_CRYPTO_ASYM_XFORM_DH]      = "dh",
175         [RTE_CRYPTO_ASYM_XFORM_DSA]     = "dsa",
176         [RTE_CRYPTO_ASYM_XFORM_ECDSA]   = "ecdsa",
177         [RTE_CRYPTO_ASYM_XFORM_ECPM]    = "ecpm",
178 };
179
180 /**
181  * Asymmetric crypto operation strings identifiers.
182  */
183 const char *rte_crypto_asym_op_strings[] = {
184         [RTE_CRYPTO_ASYM_OP_ENCRYPT]    = "encrypt",
185         [RTE_CRYPTO_ASYM_OP_DECRYPT]    = "decrypt",
186         [RTE_CRYPTO_ASYM_OP_SIGN]       = "sign",
187         [RTE_CRYPTO_ASYM_OP_VERIFY]     = "verify",
188         [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]       = "priv_key_generate",
189         [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
190         [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
191 };
192
193 /**
194  * The private data structure stored in the session mempool private data.
195  */
196 struct rte_cryptodev_sym_session_pool_private_data {
197         uint16_t nb_drivers;
198         /**< number of elements in sess_data array */
199         uint16_t user_data_sz;
200         /**< session user data will be placed after sess_data */
201 };
202
203 int
204 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
205                 const char *algo_string)
206 {
207         unsigned int i;
208
209         for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
210                 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
211                         *algo_enum = (enum rte_crypto_cipher_algorithm) i;
212                         return 0;
213                 }
214         }
215
216         /* Invalid string */
217         return -1;
218 }
219
220 int
221 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
222                 const char *algo_string)
223 {
224         unsigned int i;
225
226         for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
227                 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
228                         *algo_enum = (enum rte_crypto_auth_algorithm) i;
229                         return 0;
230                 }
231         }
232
233         /* Invalid string */
234         return -1;
235 }
236
237 int
238 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
239                 const char *algo_string)
240 {
241         unsigned int i;
242
243         for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
244                 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
245                         *algo_enum = (enum rte_crypto_aead_algorithm) i;
246                         return 0;
247                 }
248         }
249
250         /* Invalid string */
251         return -1;
252 }
253
254 int
255 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
256                 const char *xform_string)
257 {
258         unsigned int i;
259
260         for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
261                 if (strcmp(xform_string,
262                         rte_crypto_asym_xform_strings[i]) == 0) {
263                         *xform_enum = (enum rte_crypto_asym_xform_type) i;
264                         return 0;
265                 }
266         }
267
268         /* Invalid string */
269         return -1;
270 }
271
272 /**
273  * The crypto auth operation strings identifiers.
274  * It could be used in application command line.
275  */
276 const char *
277 rte_crypto_auth_operation_strings[] = {
278                 [RTE_CRYPTO_AUTH_OP_VERIFY]     = "verify",
279                 [RTE_CRYPTO_AUTH_OP_GENERATE]   = "generate"
280 };
281
282 const struct rte_cryptodev_symmetric_capability *
283 rte_cryptodev_sym_capability_get(uint8_t dev_id,
284                 const struct rte_cryptodev_sym_capability_idx *idx)
285 {
286         const struct rte_cryptodev_capabilities *capability;
287         struct rte_cryptodev_info dev_info;
288         int i = 0;
289
290         rte_cryptodev_info_get(dev_id, &dev_info);
291
292         while ((capability = &dev_info.capabilities[i++])->op !=
293                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
294                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
295                         continue;
296
297                 if (capability->sym.xform_type != idx->type)
298                         continue;
299
300                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
301                         capability->sym.auth.algo == idx->algo.auth)
302                         return &capability->sym;
303
304                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
305                         capability->sym.cipher.algo == idx->algo.cipher)
306                         return &capability->sym;
307
308                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
309                                 capability->sym.aead.algo == idx->algo.aead)
310                         return &capability->sym;
311         }
312
313         return NULL;
314
315 }
316
317 static int
318 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
319 {
320         unsigned int next_size;
321
322         /* Check lower/upper bounds */
323         if (size < range->min)
324                 return -1;
325
326         if (size > range->max)
327                 return -1;
328
329         /* If range is actually only one value, size is correct */
330         if (range->increment == 0)
331                 return 0;
332
333         /* Check if value is one of the supported sizes */
334         for (next_size = range->min; next_size <= range->max;
335                         next_size += range->increment)
336                 if (size == next_size)
337                         return 0;
338
339         return -1;
340 }
341
342 const struct rte_cryptodev_asymmetric_xform_capability *
343 rte_cryptodev_asym_capability_get(uint8_t dev_id,
344                 const struct rte_cryptodev_asym_capability_idx *idx)
345 {
346         const struct rte_cryptodev_capabilities *capability;
347         struct rte_cryptodev_info dev_info;
348         unsigned int i = 0;
349
350         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
351         rte_cryptodev_info_get(dev_id, &dev_info);
352
353         while ((capability = &dev_info.capabilities[i++])->op !=
354                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
355                 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
356                         continue;
357
358                 if (capability->asym.xform_capa.xform_type == idx->type)
359                         return &capability->asym.xform_capa;
360         }
361         return NULL;
362 };
363
364 int
365 rte_cryptodev_sym_capability_check_cipher(
366                 const struct rte_cryptodev_symmetric_capability *capability,
367                 uint16_t key_size, uint16_t iv_size)
368 {
369         if (param_range_check(key_size, &capability->cipher.key_size) != 0)
370                 return -1;
371
372         if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
373                 return -1;
374
375         return 0;
376 }
377
378 int
379 rte_cryptodev_sym_capability_check_auth(
380                 const struct rte_cryptodev_symmetric_capability *capability,
381                 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
382 {
383         if (param_range_check(key_size, &capability->auth.key_size) != 0)
384                 return -1;
385
386         if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
387                 return -1;
388
389         if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
390                 return -1;
391
392         return 0;
393 }
394
395 int
396 rte_cryptodev_sym_capability_check_aead(
397                 const struct rte_cryptodev_symmetric_capability *capability,
398                 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
399                 uint16_t iv_size)
400 {
401         if (param_range_check(key_size, &capability->aead.key_size) != 0)
402                 return -1;
403
404         if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
405                 return -1;
406
407         if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
408                 return -1;
409
410         if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
411                 return -1;
412
413         return 0;
414 }
415 int
416 rte_cryptodev_asym_xform_capability_check_optype(
417         const struct rte_cryptodev_asymmetric_xform_capability *capability,
418         enum rte_crypto_asym_op_type op_type)
419 {
420         if (capability->op_types & (1 << op_type))
421                 return 1;
422
423         return 0;
424 }
425
426 int
427 rte_cryptodev_asym_xform_capability_check_modlen(
428         const struct rte_cryptodev_asymmetric_xform_capability *capability,
429         uint16_t modlen)
430 {
431         /* no need to check for limits, if min or max = 0 */
432         if (capability->modlen.min != 0) {
433                 if (modlen < capability->modlen.min)
434                         return -1;
435         }
436
437         if (capability->modlen.max != 0) {
438                 if (modlen > capability->modlen.max)
439                         return -1;
440         }
441
442         /* in any case, check if given modlen is module increment */
443         if (capability->modlen.increment != 0) {
444                 if (modlen % (capability->modlen.increment))
445                         return -1;
446         }
447
448         return 0;
449 }
450
451
452 const char *
453 rte_cryptodev_get_feature_name(uint64_t flag)
454 {
455         switch (flag) {
456         case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
457                 return "SYMMETRIC_CRYPTO";
458         case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
459                 return "ASYMMETRIC_CRYPTO";
460         case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
461                 return "SYM_OPERATION_CHAINING";
462         case RTE_CRYPTODEV_FF_CPU_SSE:
463                 return "CPU_SSE";
464         case RTE_CRYPTODEV_FF_CPU_AVX:
465                 return "CPU_AVX";
466         case RTE_CRYPTODEV_FF_CPU_AVX2:
467                 return "CPU_AVX2";
468         case RTE_CRYPTODEV_FF_CPU_AVX512:
469                 return "CPU_AVX512";
470         case RTE_CRYPTODEV_FF_CPU_AESNI:
471                 return "CPU_AESNI";
472         case RTE_CRYPTODEV_FF_HW_ACCELERATED:
473                 return "HW_ACCELERATED";
474         case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
475                 return "IN_PLACE_SGL";
476         case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
477                 return "OOP_SGL_IN_SGL_OUT";
478         case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
479                 return "OOP_SGL_IN_LB_OUT";
480         case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
481                 return "OOP_LB_IN_SGL_OUT";
482         case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
483                 return "OOP_LB_IN_LB_OUT";
484         case RTE_CRYPTODEV_FF_CPU_NEON:
485                 return "CPU_NEON";
486         case RTE_CRYPTODEV_FF_CPU_ARM_CE:
487                 return "CPU_ARM_CE";
488         case RTE_CRYPTODEV_FF_SECURITY:
489                 return "SECURITY_PROTOCOL";
490         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
491                 return "RSA_PRIV_OP_KEY_EXP";
492         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
493                 return "RSA_PRIV_OP_KEY_QT";
494         case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
495                 return "DIGEST_ENCRYPTED";
496         case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
497                 return "SYM_CPU_CRYPTO";
498         default:
499                 return NULL;
500         }
501 }
502
503 struct rte_cryptodev *
504 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
505 {
506         return &cryptodev_globals.devs[dev_id];
507 }
508
509 struct rte_cryptodev *
510 rte_cryptodev_pmd_get_named_dev(const char *name)
511 {
512         struct rte_cryptodev *dev;
513         unsigned int i;
514
515         if (name == NULL)
516                 return NULL;
517
518         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
519                 dev = &cryptodev_globals.devs[i];
520
521                 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
522                                 (strcmp(dev->data->name, name) == 0))
523                         return dev;
524         }
525
526         return NULL;
527 }
528
529 static inline uint8_t
530 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
531 {
532         if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
533                         rte_crypto_devices[dev_id].data == NULL)
534                 return 0;
535
536         return 1;
537 }
538
539 unsigned int
540 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
541 {
542         struct rte_cryptodev *dev = NULL;
543
544         if (!rte_cryptodev_is_valid_device_data(dev_id))
545                 return 0;
546
547         dev = rte_cryptodev_pmd_get_dev(dev_id);
548         if (dev->attached != RTE_CRYPTODEV_ATTACHED)
549                 return 0;
550         else
551                 return 1;
552 }
553
554
555 int
556 rte_cryptodev_get_dev_id(const char *name)
557 {
558         unsigned i;
559
560         if (name == NULL)
561                 return -1;
562
563         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
564                 if (!rte_cryptodev_is_valid_device_data(i))
565                         continue;
566                 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
567                                 == 0) &&
568                                 (cryptodev_globals.devs[i].attached ==
569                                                 RTE_CRYPTODEV_ATTACHED))
570                         return i;
571         }
572
573         return -1;
574 }
575
576 uint8_t
577 rte_cryptodev_count(void)
578 {
579         return cryptodev_globals.nb_devs;
580 }
581
582 uint8_t
583 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
584 {
585         uint8_t i, dev_count = 0;
586
587         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
588                 if (cryptodev_globals.devs[i].driver_id == driver_id &&
589                         cryptodev_globals.devs[i].attached ==
590                                         RTE_CRYPTODEV_ATTACHED)
591                         dev_count++;
592
593         return dev_count;
594 }
595
596 uint8_t
597 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
598         uint8_t nb_devices)
599 {
600         uint8_t i, count = 0;
601         struct rte_cryptodev *devs = cryptodev_globals.devs;
602
603         for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
604                 if (!rte_cryptodev_is_valid_device_data(i))
605                         continue;
606
607                 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
608                         int cmp;
609
610                         cmp = strncmp(devs[i].device->driver->name,
611                                         driver_name,
612                                         strlen(driver_name) + 1);
613
614                         if (cmp == 0)
615                                 devices[count++] = devs[i].data->dev_id;
616                 }
617         }
618
619         return count;
620 }
621
622 void *
623 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
624 {
625         if (dev_id < RTE_CRYPTO_MAX_DEVS &&
626                         (rte_crypto_devices[dev_id].feature_flags &
627                         RTE_CRYPTODEV_FF_SECURITY))
628                 return rte_crypto_devices[dev_id].security_ctx;
629
630         return NULL;
631 }
632
633 int
634 rte_cryptodev_socket_id(uint8_t dev_id)
635 {
636         struct rte_cryptodev *dev;
637
638         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
639                 return -1;
640
641         dev = rte_cryptodev_pmd_get_dev(dev_id);
642
643         return dev->data->socket_id;
644 }
645
646 static inline int
647 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
648                 int socket_id)
649 {
650         char mz_name[RTE_MEMZONE_NAMESIZE];
651         const struct rte_memzone *mz;
652         int n;
653
654         /* generate memzone name */
655         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
656         if (n >= (int)sizeof(mz_name))
657                 return -EINVAL;
658
659         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
660                 mz = rte_memzone_reserve(mz_name,
661                                 sizeof(struct rte_cryptodev_data),
662                                 socket_id, 0);
663         } else
664                 mz = rte_memzone_lookup(mz_name);
665
666         if (mz == NULL)
667                 return -ENOMEM;
668
669         *data = mz->addr;
670         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
671                 memset(*data, 0, sizeof(struct rte_cryptodev_data));
672
673         return 0;
674 }
675
676 static inline int
677 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
678 {
679         char mz_name[RTE_MEMZONE_NAMESIZE];
680         const struct rte_memzone *mz;
681         int n;
682
683         /* generate memzone name */
684         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
685         if (n >= (int)sizeof(mz_name))
686                 return -EINVAL;
687
688         mz = rte_memzone_lookup(mz_name);
689         if (mz == NULL)
690                 return -ENOMEM;
691
692         RTE_ASSERT(*data == mz->addr);
693         *data = NULL;
694
695         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
696                 return rte_memzone_free(mz);
697
698         return 0;
699 }
700
701 static uint8_t
702 rte_cryptodev_find_free_device_index(void)
703 {
704         uint8_t dev_id;
705
706         for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
707                 if (rte_crypto_devices[dev_id].attached ==
708                                 RTE_CRYPTODEV_DETACHED)
709                         return dev_id;
710         }
711         return RTE_CRYPTO_MAX_DEVS;
712 }
713
714 struct rte_cryptodev *
715 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
716 {
717         struct rte_cryptodev *cryptodev;
718         uint8_t dev_id;
719
720         if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
721                 CDEV_LOG_ERR("Crypto device with name %s already "
722                                 "allocated!", name);
723                 return NULL;
724         }
725
726         dev_id = rte_cryptodev_find_free_device_index();
727         if (dev_id == RTE_CRYPTO_MAX_DEVS) {
728                 CDEV_LOG_ERR("Reached maximum number of crypto devices");
729                 return NULL;
730         }
731
732         cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
733
734         if (cryptodev->data == NULL) {
735                 struct rte_cryptodev_data **cryptodev_data =
736                                 &cryptodev_globals.data[dev_id];
737
738                 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
739                                 socket_id);
740
741                 if (retval < 0 || *cryptodev_data == NULL)
742                         return NULL;
743
744                 cryptodev->data = *cryptodev_data;
745
746                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
747                         strlcpy(cryptodev->data->name, name,
748                                 RTE_CRYPTODEV_NAME_MAX_LEN);
749
750                         cryptodev->data->dev_id = dev_id;
751                         cryptodev->data->socket_id = socket_id;
752                         cryptodev->data->dev_started = 0;
753                 }
754
755                 /* init user callbacks */
756                 TAILQ_INIT(&(cryptodev->link_intr_cbs));
757
758                 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
759
760                 cryptodev_globals.nb_devs++;
761         }
762
763         return cryptodev;
764 }
765
766 int
767 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
768 {
769         int ret;
770         uint8_t dev_id;
771
772         if (cryptodev == NULL)
773                 return -EINVAL;
774
775         dev_id = cryptodev->data->dev_id;
776
777         /* Close device only if device operations have been set */
778         if (cryptodev->dev_ops) {
779                 ret = rte_cryptodev_close(dev_id);
780                 if (ret < 0)
781                         return ret;
782         }
783
784         ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
785         if (ret < 0)
786                 return ret;
787
788         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
789         cryptodev_globals.nb_devs--;
790         return 0;
791 }
792
793 uint16_t
794 rte_cryptodev_queue_pair_count(uint8_t dev_id)
795 {
796         struct rte_cryptodev *dev;
797
798         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
799                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
800                 return 0;
801         }
802
803         dev = &rte_crypto_devices[dev_id];
804         return dev->data->nb_queue_pairs;
805 }
806
807 static int
808 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
809                 int socket_id)
810 {
811         struct rte_cryptodev_info dev_info;
812         void **qp;
813         unsigned i;
814
815         if ((dev == NULL) || (nb_qpairs < 1)) {
816                 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
817                                                         dev, nb_qpairs);
818                 return -EINVAL;
819         }
820
821         CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
822                         nb_qpairs, dev->data->dev_id);
823
824         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
825
826         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
827         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
828
829         if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
830                 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
831                                 nb_qpairs, dev->data->dev_id);
832             return -EINVAL;
833         }
834
835         if (dev->data->queue_pairs == NULL) { /* first time configuration */
836                 dev->data->queue_pairs = rte_zmalloc_socket(
837                                 "cryptodev->queue_pairs",
838                                 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
839                                 RTE_CACHE_LINE_SIZE, socket_id);
840
841                 if (dev->data->queue_pairs == NULL) {
842                         dev->data->nb_queue_pairs = 0;
843                         CDEV_LOG_ERR("failed to get memory for qp meta data, "
844                                                         "nb_queues %u",
845                                                         nb_qpairs);
846                         return -(ENOMEM);
847                 }
848         } else { /* re-configure */
849                 int ret;
850                 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
851
852                 qp = dev->data->queue_pairs;
853
854                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
855                                 -ENOTSUP);
856
857                 for (i = nb_qpairs; i < old_nb_queues; i++) {
858                         ret = (*dev->dev_ops->queue_pair_release)(dev, i);
859                         if (ret < 0)
860                                 return ret;
861                 }
862
863                 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
864                                 RTE_CACHE_LINE_SIZE);
865                 if (qp == NULL) {
866                         CDEV_LOG_ERR("failed to realloc qp meta data,"
867                                                 " nb_queues %u", nb_qpairs);
868                         return -(ENOMEM);
869                 }
870
871                 if (nb_qpairs > old_nb_queues) {
872                         uint16_t new_qs = nb_qpairs - old_nb_queues;
873
874                         memset(qp + old_nb_queues, 0,
875                                 sizeof(qp[0]) * new_qs);
876                 }
877
878                 dev->data->queue_pairs = qp;
879
880         }
881         dev->data->nb_queue_pairs = nb_qpairs;
882         return 0;
883 }
884
885 int
886 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
887 {
888         struct rte_cryptodev *dev;
889         int diag;
890
891         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
892                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
893                 return -EINVAL;
894         }
895
896         dev = &rte_crypto_devices[dev_id];
897
898         if (dev->data->dev_started) {
899                 CDEV_LOG_ERR(
900                     "device %d must be stopped to allow configuration", dev_id);
901                 return -EBUSY;
902         }
903
904         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
905
906         /* Setup new number of queue pairs and reconfigure device. */
907         diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
908                         config->socket_id);
909         if (diag != 0) {
910                 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
911                                 dev_id, diag);
912                 return diag;
913         }
914
915         return (*dev->dev_ops->dev_configure)(dev, config);
916 }
917
918
919 int
920 rte_cryptodev_start(uint8_t dev_id)
921 {
922         struct rte_cryptodev *dev;
923         int diag;
924
925         CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
926
927         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
928                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
929                 return -EINVAL;
930         }
931
932         dev = &rte_crypto_devices[dev_id];
933
934         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
935
936         if (dev->data->dev_started != 0) {
937                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
938                         dev_id);
939                 return 0;
940         }
941
942         diag = (*dev->dev_ops->dev_start)(dev);
943         if (diag == 0)
944                 dev->data->dev_started = 1;
945         else
946                 return diag;
947
948         return 0;
949 }
950
951 void
952 rte_cryptodev_stop(uint8_t dev_id)
953 {
954         struct rte_cryptodev *dev;
955
956         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
957                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
958                 return;
959         }
960
961         dev = &rte_crypto_devices[dev_id];
962
963         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
964
965         if (dev->data->dev_started == 0) {
966                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
967                         dev_id);
968                 return;
969         }
970
971         (*dev->dev_ops->dev_stop)(dev);
972         dev->data->dev_started = 0;
973 }
974
975 int
976 rte_cryptodev_close(uint8_t dev_id)
977 {
978         struct rte_cryptodev *dev;
979         int retval;
980
981         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
982                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
983                 return -1;
984         }
985
986         dev = &rte_crypto_devices[dev_id];
987
988         /* Device must be stopped before it can be closed */
989         if (dev->data->dev_started == 1) {
990                 CDEV_LOG_ERR("Device %u must be stopped before closing",
991                                 dev_id);
992                 return -EBUSY;
993         }
994
995         /* We can't close the device if there are outstanding sessions in use */
996         if (dev->data->session_pool != NULL) {
997                 if (!rte_mempool_full(dev->data->session_pool)) {
998                         CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
999                                         "has sessions still in use, free "
1000                                         "all sessions before calling close",
1001                                         (unsigned)dev_id);
1002                         return -EBUSY;
1003                 }
1004         }
1005
1006         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1007         retval = (*dev->dev_ops->dev_close)(dev);
1008
1009         if (retval < 0)
1010                 return retval;
1011
1012         return 0;
1013 }
1014
1015 int
1016 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1017                 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1018
1019 {
1020         struct rte_cryptodev *dev;
1021
1022         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1023                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1024                 return -EINVAL;
1025         }
1026
1027         dev = &rte_crypto_devices[dev_id];
1028         if (queue_pair_id >= dev->data->nb_queue_pairs) {
1029                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1030                 return -EINVAL;
1031         }
1032
1033         if (!qp_conf) {
1034                 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1035                 return -EINVAL;
1036         }
1037
1038         if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1039                         (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1040                 CDEV_LOG_ERR("Invalid mempools\n");
1041                 return -EINVAL;
1042         }
1043
1044         if (qp_conf->mp_session) {
1045                 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1046                 uint32_t obj_size = qp_conf->mp_session->elt_size;
1047                 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1048                 struct rte_cryptodev_sym_session s = {0};
1049
1050                 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1051                 if (!pool_priv || qp_conf->mp_session->private_data_size <
1052                                 sizeof(*pool_priv)) {
1053                         CDEV_LOG_ERR("Invalid mempool\n");
1054                         return -EINVAL;
1055                 }
1056
1057                 s.nb_drivers = pool_priv->nb_drivers;
1058                 s.user_data_sz = pool_priv->user_data_sz;
1059
1060                 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1061                         obj_size) || (s.nb_drivers <= dev->driver_id) ||
1062                         rte_cryptodev_sym_get_private_session_size(dev_id) >
1063                                 obj_priv_size) {
1064                         CDEV_LOG_ERR("Invalid mempool\n");
1065                         return -EINVAL;
1066                 }
1067         }
1068
1069         if (dev->data->dev_started) {
1070                 CDEV_LOG_ERR(
1071                     "device %d must be stopped to allow configuration", dev_id);
1072                 return -EBUSY;
1073         }
1074
1075         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1076
1077         return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1078                         socket_id);
1079 }
1080
1081
1082 int
1083 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1084 {
1085         struct rte_cryptodev *dev;
1086
1087         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1088                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1089                 return -ENODEV;
1090         }
1091
1092         if (stats == NULL) {
1093                 CDEV_LOG_ERR("Invalid stats ptr");
1094                 return -EINVAL;
1095         }
1096
1097         dev = &rte_crypto_devices[dev_id];
1098         memset(stats, 0, sizeof(*stats));
1099
1100         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1101         (*dev->dev_ops->stats_get)(dev, stats);
1102         return 0;
1103 }
1104
1105 void
1106 rte_cryptodev_stats_reset(uint8_t dev_id)
1107 {
1108         struct rte_cryptodev *dev;
1109
1110         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1111                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1112                 return;
1113         }
1114
1115         dev = &rte_crypto_devices[dev_id];
1116
1117         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1118         (*dev->dev_ops->stats_reset)(dev);
1119 }
1120
1121
1122 void
1123 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1124 {
1125         struct rte_cryptodev *dev;
1126
1127         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1128                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1129                 return;
1130         }
1131
1132         dev = &rte_crypto_devices[dev_id];
1133
1134         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1135
1136         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1137         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1138
1139         dev_info->driver_name = dev->device->driver->name;
1140         dev_info->device = dev->device;
1141 }
1142
1143
1144 int
1145 rte_cryptodev_callback_register(uint8_t dev_id,
1146                         enum rte_cryptodev_event_type event,
1147                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1148 {
1149         struct rte_cryptodev *dev;
1150         struct rte_cryptodev_callback *user_cb;
1151
1152         if (!cb_fn)
1153                 return -EINVAL;
1154
1155         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1156                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1157                 return -EINVAL;
1158         }
1159
1160         dev = &rte_crypto_devices[dev_id];
1161         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1162
1163         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1164                 if (user_cb->cb_fn == cb_fn &&
1165                         user_cb->cb_arg == cb_arg &&
1166                         user_cb->event == event) {
1167                         break;
1168                 }
1169         }
1170
1171         /* create a new callback. */
1172         if (user_cb == NULL) {
1173                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1174                                 sizeof(struct rte_cryptodev_callback), 0);
1175                 if (user_cb != NULL) {
1176                         user_cb->cb_fn = cb_fn;
1177                         user_cb->cb_arg = cb_arg;
1178                         user_cb->event = event;
1179                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1180                 }
1181         }
1182
1183         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1184         return (user_cb == NULL) ? -ENOMEM : 0;
1185 }
1186
1187 int
1188 rte_cryptodev_callback_unregister(uint8_t dev_id,
1189                         enum rte_cryptodev_event_type event,
1190                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1191 {
1192         int ret;
1193         struct rte_cryptodev *dev;
1194         struct rte_cryptodev_callback *cb, *next;
1195
1196         if (!cb_fn)
1197                 return -EINVAL;
1198
1199         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1200                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1201                 return -EINVAL;
1202         }
1203
1204         dev = &rte_crypto_devices[dev_id];
1205         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1206
1207         ret = 0;
1208         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1209
1210                 next = TAILQ_NEXT(cb, next);
1211
1212                 if (cb->cb_fn != cb_fn || cb->event != event ||
1213                                 (cb->cb_arg != (void *)-1 &&
1214                                 cb->cb_arg != cb_arg))
1215                         continue;
1216
1217                 /*
1218                  * if this callback is not executing right now,
1219                  * then remove it.
1220                  */
1221                 if (cb->active == 0) {
1222                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1223                         rte_free(cb);
1224                 } else {
1225                         ret = -EAGAIN;
1226                 }
1227         }
1228
1229         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1230         return ret;
1231 }
1232
1233 void
1234 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1235         enum rte_cryptodev_event_type event)
1236 {
1237         struct rte_cryptodev_callback *cb_lst;
1238         struct rte_cryptodev_callback dev_cb;
1239
1240         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1241         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1242                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1243                         continue;
1244                 dev_cb = *cb_lst;
1245                 cb_lst->active = 1;
1246                 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1247                 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1248                                                 dev_cb.cb_arg);
1249                 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1250                 cb_lst->active = 0;
1251         }
1252         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1253 }
1254
1255
1256 int
1257 rte_cryptodev_sym_session_init(uint8_t dev_id,
1258                 struct rte_cryptodev_sym_session *sess,
1259                 struct rte_crypto_sym_xform *xforms,
1260                 struct rte_mempool *mp)
1261 {
1262         struct rte_cryptodev *dev;
1263         uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1264                         dev_id);
1265         uint8_t index;
1266         int ret;
1267
1268         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1269                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1270                 return -EINVAL;
1271         }
1272
1273         dev = rte_cryptodev_pmd_get_dev(dev_id);
1274
1275         if (sess == NULL || xforms == NULL || dev == NULL)
1276                 return -EINVAL;
1277
1278         if (mp->elt_size < sess_priv_sz)
1279                 return -EINVAL;
1280
1281         index = dev->driver_id;
1282         if (index >= sess->nb_drivers)
1283                 return -EINVAL;
1284
1285         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1286
1287         if (sess->sess_data[index].refcnt == 0) {
1288                 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1289                                                         sess, mp);
1290                 if (ret < 0) {
1291                         CDEV_LOG_ERR(
1292                                 "dev_id %d failed to configure session details",
1293                                 dev_id);
1294                         return ret;
1295                 }
1296         }
1297
1298         sess->sess_data[index].refcnt++;
1299         return 0;
1300 }
1301
1302 int
1303 rte_cryptodev_asym_session_init(uint8_t dev_id,
1304                 struct rte_cryptodev_asym_session *sess,
1305                 struct rte_crypto_asym_xform *xforms,
1306                 struct rte_mempool *mp)
1307 {
1308         struct rte_cryptodev *dev;
1309         uint8_t index;
1310         int ret;
1311
1312         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1313                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1314                 return -EINVAL;
1315         }
1316
1317         dev = rte_cryptodev_pmd_get_dev(dev_id);
1318
1319         if (sess == NULL || xforms == NULL || dev == NULL)
1320                 return -EINVAL;
1321
1322         index = dev->driver_id;
1323
1324         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1325                                 -ENOTSUP);
1326
1327         if (sess->sess_private_data[index] == NULL) {
1328                 ret = dev->dev_ops->asym_session_configure(dev,
1329                                                         xforms,
1330                                                         sess, mp);
1331                 if (ret < 0) {
1332                         CDEV_LOG_ERR(
1333                                 "dev_id %d failed to configure session details",
1334                                 dev_id);
1335                         return ret;
1336                 }
1337         }
1338
1339         return 0;
1340 }
1341
1342 struct rte_mempool *
1343 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1344         uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1345         int socket_id)
1346 {
1347         struct rte_mempool *mp;
1348         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1349         uint32_t obj_sz;
1350
1351         obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1352         if (obj_sz > elt_size)
1353                 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1354                                 obj_sz);
1355         else
1356                 obj_sz = elt_size;
1357
1358         mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1359                         (uint32_t)(sizeof(*pool_priv)),
1360                         NULL, NULL, NULL, NULL,
1361                         socket_id, 0);
1362         if (mp == NULL) {
1363                 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1364                         __func__, name, rte_errno);
1365                 return NULL;
1366         }
1367
1368         pool_priv = rte_mempool_get_priv(mp);
1369         if (!pool_priv) {
1370                 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1371                         __func__, name);
1372                 rte_mempool_free(mp);
1373                 return NULL;
1374         }
1375
1376         pool_priv->nb_drivers = nb_drivers;
1377         pool_priv->user_data_sz = user_data_size;
1378
1379         return mp;
1380 }
1381
1382 static unsigned int
1383 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1384 {
1385         return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1386                         sess->user_data_sz;
1387 }
1388
1389 struct rte_cryptodev_sym_session *
1390 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1391 {
1392         struct rte_cryptodev_sym_session *sess;
1393         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1394
1395         if (!mp) {
1396                 CDEV_LOG_ERR("Invalid mempool\n");
1397                 return NULL;
1398         }
1399
1400         pool_priv = rte_mempool_get_priv(mp);
1401
1402         if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) {
1403                 CDEV_LOG_ERR("Invalid mempool\n");
1404                 return NULL;
1405         }
1406
1407         /* Allocate a session structure from the session pool */
1408         if (rte_mempool_get(mp, (void **)&sess)) {
1409                 CDEV_LOG_ERR("couldn't get object from session mempool");
1410                 return NULL;
1411         }
1412
1413         sess->nb_drivers = pool_priv->nb_drivers;
1414         sess->user_data_sz = pool_priv->user_data_sz;
1415         sess->opaque_data = 0;
1416
1417         /* Clear device session pointer.
1418          * Include the flag indicating presence of user data
1419          */
1420         memset(sess->sess_data, 0,
1421                         rte_cryptodev_sym_session_data_size(sess));
1422
1423         return sess;
1424 }
1425
1426 struct rte_cryptodev_asym_session *
1427 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1428 {
1429         struct rte_cryptodev_asym_session *sess;
1430
1431         /* Allocate a session structure from the session pool */
1432         if (rte_mempool_get(mp, (void **)&sess)) {
1433                 CDEV_LOG_ERR("couldn't get object from session mempool");
1434                 return NULL;
1435         }
1436
1437         /* Clear device session pointer.
1438          * Include the flag indicating presence of private data
1439          */
1440         memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1441
1442         return sess;
1443 }
1444
1445 int
1446 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1447                 struct rte_cryptodev_sym_session *sess)
1448 {
1449         struct rte_cryptodev *dev;
1450         uint8_t driver_id;
1451
1452         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1453                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1454                 return -EINVAL;
1455         }
1456
1457         dev = rte_cryptodev_pmd_get_dev(dev_id);
1458
1459         if (dev == NULL || sess == NULL)
1460                 return -EINVAL;
1461
1462         driver_id = dev->driver_id;
1463         if (sess->sess_data[driver_id].refcnt == 0)
1464                 return 0;
1465         if (--sess->sess_data[driver_id].refcnt != 0)
1466                 return -EBUSY;
1467
1468         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1469
1470         dev->dev_ops->sym_session_clear(dev, sess);
1471
1472         return 0;
1473 }
1474
1475 int
1476 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1477                 struct rte_cryptodev_asym_session *sess)
1478 {
1479         struct rte_cryptodev *dev;
1480
1481         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1482                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1483                 return -EINVAL;
1484         }
1485
1486         dev = rte_cryptodev_pmd_get_dev(dev_id);
1487
1488         if (dev == NULL || sess == NULL)
1489                 return -EINVAL;
1490
1491         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1492
1493         dev->dev_ops->asym_session_clear(dev, sess);
1494
1495         return 0;
1496 }
1497
1498 int
1499 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1500 {
1501         uint8_t i;
1502         struct rte_mempool *sess_mp;
1503
1504         if (sess == NULL)
1505                 return -EINVAL;
1506
1507         /* Check that all device private data has been freed */
1508         for (i = 0; i < sess->nb_drivers; i++) {
1509                 if (sess->sess_data[i].refcnt != 0)
1510                         return -EBUSY;
1511         }
1512
1513         /* Return session to mempool */
1514         sess_mp = rte_mempool_from_obj(sess);
1515         rte_mempool_put(sess_mp, sess);
1516
1517         return 0;
1518 }
1519
1520 int
1521 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1522 {
1523         uint8_t i;
1524         void *sess_priv;
1525         struct rte_mempool *sess_mp;
1526
1527         if (sess == NULL)
1528                 return -EINVAL;
1529
1530         /* Check that all device private data has been freed */
1531         for (i = 0; i < nb_drivers; i++) {
1532                 sess_priv = get_asym_session_private_data(sess, i);
1533                 if (sess_priv != NULL)
1534                         return -EBUSY;
1535         }
1536
1537         /* Return session to mempool */
1538         sess_mp = rte_mempool_from_obj(sess);
1539         rte_mempool_put(sess_mp, sess);
1540
1541         return 0;
1542 }
1543
1544 unsigned int
1545 rte_cryptodev_sym_get_header_session_size(void)
1546 {
1547         /*
1548          * Header contains pointers to the private data of all registered
1549          * drivers and all necessary information to ensure safely clear
1550          * or free al session.
1551          */
1552         struct rte_cryptodev_sym_session s = {0};
1553
1554         s.nb_drivers = nb_drivers;
1555
1556         return (unsigned int)(sizeof(s) +
1557                         rte_cryptodev_sym_session_data_size(&s));
1558 }
1559
1560 unsigned int
1561 rte_cryptodev_sym_get_existing_header_session_size(
1562                 struct rte_cryptodev_sym_session *sess)
1563 {
1564         if (!sess)
1565                 return 0;
1566         else
1567                 return (unsigned int)(sizeof(*sess) +
1568                                 rte_cryptodev_sym_session_data_size(sess));
1569 }
1570
1571 unsigned int
1572 rte_cryptodev_asym_get_header_session_size(void)
1573 {
1574         /*
1575          * Header contains pointers to the private data
1576          * of all registered drivers, and a flag which
1577          * indicates presence of private data
1578          */
1579         return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1580 }
1581
1582 unsigned int
1583 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1584 {
1585         struct rte_cryptodev *dev;
1586         unsigned int priv_sess_size;
1587
1588         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1589                 return 0;
1590
1591         dev = rte_cryptodev_pmd_get_dev(dev_id);
1592
1593         if (*dev->dev_ops->sym_session_get_size == NULL)
1594                 return 0;
1595
1596         priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1597
1598         return priv_sess_size;
1599 }
1600
1601 unsigned int
1602 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1603 {
1604         struct rte_cryptodev *dev;
1605         unsigned int header_size = sizeof(void *) * nb_drivers;
1606         unsigned int priv_sess_size;
1607
1608         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1609                 return 0;
1610
1611         dev = rte_cryptodev_pmd_get_dev(dev_id);
1612
1613         if (*dev->dev_ops->asym_session_get_size == NULL)
1614                 return 0;
1615
1616         priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1617         if (priv_sess_size < header_size)
1618                 return header_size;
1619
1620         return priv_sess_size;
1621
1622 }
1623
1624 int
1625 rte_cryptodev_sym_session_set_user_data(
1626                                         struct rte_cryptodev_sym_session *sess,
1627                                         void *data,
1628                                         uint16_t size)
1629 {
1630         if (sess == NULL)
1631                 return -EINVAL;
1632
1633         if (sess->user_data_sz < size)
1634                 return -ENOMEM;
1635
1636         rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1637         return 0;
1638 }
1639
1640 void *
1641 rte_cryptodev_sym_session_get_user_data(
1642                                         struct rte_cryptodev_sym_session *sess)
1643 {
1644         if (sess == NULL || sess->user_data_sz == 0)
1645                 return NULL;
1646
1647         return (void *)(sess->sess_data + sess->nb_drivers);
1648 }
1649
1650 static inline void
1651 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
1652 {
1653         uint32_t i;
1654         for (i = 0; i < vec->num; i++)
1655                 vec->status[i] = errnum;
1656 }
1657
1658 uint32_t
1659 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1660         struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1661         struct rte_crypto_sym_vec *vec)
1662 {
1663         struct rte_cryptodev *dev;
1664
1665         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1666                 sym_crypto_fill_status(vec, EINVAL);
1667                 return 0;
1668         }
1669
1670         dev = rte_cryptodev_pmd_get_dev(dev_id);
1671
1672         if (*dev->dev_ops->sym_cpu_process == NULL ||
1673                 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
1674                 sym_crypto_fill_status(vec, ENOTSUP);
1675                 return 0;
1676         }
1677
1678         return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
1679 }
1680
1681 /** Initialise rte_crypto_op mempool element */
1682 static void
1683 rte_crypto_op_init(struct rte_mempool *mempool,
1684                 void *opaque_arg,
1685                 void *_op_data,
1686                 __rte_unused unsigned i)
1687 {
1688         struct rte_crypto_op *op = _op_data;
1689         enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1690
1691         memset(_op_data, 0, mempool->elt_size);
1692
1693         __rte_crypto_op_reset(op, type);
1694
1695         op->phys_addr = rte_mem_virt2iova(_op_data);
1696         op->mempool = mempool;
1697 }
1698
1699
1700 struct rte_mempool *
1701 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1702                 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1703                 int socket_id)
1704 {
1705         struct rte_crypto_op_pool_private *priv;
1706
1707         unsigned elt_size = sizeof(struct rte_crypto_op) +
1708                         priv_size;
1709
1710         if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1711                 elt_size += sizeof(struct rte_crypto_sym_op);
1712         } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1713                 elt_size += sizeof(struct rte_crypto_asym_op);
1714         } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1715                 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1716                                     sizeof(struct rte_crypto_asym_op));
1717         } else {
1718                 CDEV_LOG_ERR("Invalid op_type\n");
1719                 return NULL;
1720         }
1721
1722         /* lookup mempool in case already allocated */
1723         struct rte_mempool *mp = rte_mempool_lookup(name);
1724
1725         if (mp != NULL) {
1726                 priv = (struct rte_crypto_op_pool_private *)
1727                                 rte_mempool_get_priv(mp);
1728
1729                 if (mp->elt_size != elt_size ||
1730                                 mp->cache_size < cache_size ||
1731                                 mp->size < nb_elts ||
1732                                 priv->priv_size <  priv_size) {
1733                         mp = NULL;
1734                         CDEV_LOG_ERR("Mempool %s already exists but with "
1735                                         "incompatible parameters", name);
1736                         return NULL;
1737                 }
1738                 return mp;
1739         }
1740
1741         mp = rte_mempool_create(
1742                         name,
1743                         nb_elts,
1744                         elt_size,
1745                         cache_size,
1746                         sizeof(struct rte_crypto_op_pool_private),
1747                         NULL,
1748                         NULL,
1749                         rte_crypto_op_init,
1750                         &type,
1751                         socket_id,
1752                         0);
1753
1754         if (mp == NULL) {
1755                 CDEV_LOG_ERR("Failed to create mempool %s", name);
1756                 return NULL;
1757         }
1758
1759         priv = (struct rte_crypto_op_pool_private *)
1760                         rte_mempool_get_priv(mp);
1761
1762         priv->priv_size = priv_size;
1763         priv->type = type;
1764
1765         return mp;
1766 }
1767
1768 int
1769 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1770 {
1771         struct rte_cryptodev *dev = NULL;
1772         uint32_t i = 0;
1773
1774         if (name == NULL)
1775                 return -EINVAL;
1776
1777         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1778                 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1779                                 "%s_%u", dev_name_prefix, i);
1780
1781                 if (ret < 0)
1782                         return ret;
1783
1784                 dev = rte_cryptodev_pmd_get_named_dev(name);
1785                 if (!dev)
1786                         return 0;
1787         }
1788
1789         return -1;
1790 }
1791
1792 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1793
1794 static struct cryptodev_driver_list cryptodev_driver_list =
1795         TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1796
1797 int
1798 rte_cryptodev_driver_id_get(const char *name)
1799 {
1800         struct cryptodev_driver *driver;
1801         const char *driver_name;
1802
1803         if (name == NULL) {
1804                 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1805                 return -1;
1806         }
1807
1808         TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1809                 driver_name = driver->driver->name;
1810                 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
1811                         return driver->id;
1812         }
1813         return -1;
1814 }
1815
1816 const char *
1817 rte_cryptodev_name_get(uint8_t dev_id)
1818 {
1819         struct rte_cryptodev *dev;
1820
1821         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1822                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1823                 return NULL;
1824         }
1825
1826         dev = rte_cryptodev_pmd_get_dev(dev_id);
1827         if (dev == NULL)
1828                 return NULL;
1829
1830         return dev->data->name;
1831 }
1832
1833 const char *
1834 rte_cryptodev_driver_name_get(uint8_t driver_id)
1835 {
1836         struct cryptodev_driver *driver;
1837
1838         TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1839                 if (driver->id == driver_id)
1840                         return driver->driver->name;
1841         return NULL;
1842 }
1843
1844 uint8_t
1845 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1846                 const struct rte_driver *drv)
1847 {
1848         crypto_drv->driver = drv;
1849         crypto_drv->id = nb_drivers;
1850
1851         TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1852
1853         return nb_drivers++;
1854 }