cryptodev: allocate max space for internal queue array
[dpdk.git] / lib / cryptodev / rte_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15
16 #include <rte_byteorder.h>
17 #include <rte_log.h>
18 #include <rte_debug.h>
19 #include <rte_dev.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_tailq.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38
39 #include "rte_crypto.h"
40 #include "rte_cryptodev.h"
41 #include "cryptodev_pmd.h"
42 #include "rte_cryptodev_trace.h"
43
44 static uint8_t nb_drivers;
45
46 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
47
48 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
49
50 static struct rte_cryptodev_global cryptodev_globals = {
51                 .devs                   = rte_crypto_devices,
52                 .data                   = { NULL },
53                 .nb_devs                = 0
54 };
55
56 /* spinlock for crypto device callbacks */
57 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /**
60  * The user application callback description.
61  *
62  * It contains callback address to be registered by user application,
63  * the pointer to the parameters for callback, and the event type.
64  */
65 struct rte_cryptodev_callback {
66         TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
67         rte_cryptodev_cb_fn cb_fn;              /**< Callback address */
68         void *cb_arg;                           /**< Parameter for callback */
69         enum rte_cryptodev_event_type event;    /**< Interrupt event type */
70         uint32_t active;                        /**< Callback is executing */
71 };
72
73 /**
74  * The crypto cipher algorithm strings identifiers.
75  * It could be used in application command line.
76  */
77 const char *
78 rte_crypto_cipher_algorithm_strings[] = {
79         [RTE_CRYPTO_CIPHER_3DES_CBC]    = "3des-cbc",
80         [RTE_CRYPTO_CIPHER_3DES_ECB]    = "3des-ecb",
81         [RTE_CRYPTO_CIPHER_3DES_CTR]    = "3des-ctr",
82
83         [RTE_CRYPTO_CIPHER_AES_CBC]     = "aes-cbc",
84         [RTE_CRYPTO_CIPHER_AES_CTR]     = "aes-ctr",
85         [RTE_CRYPTO_CIPHER_AES_DOCSISBPI]       = "aes-docsisbpi",
86         [RTE_CRYPTO_CIPHER_AES_ECB]     = "aes-ecb",
87         [RTE_CRYPTO_CIPHER_AES_F8]      = "aes-f8",
88         [RTE_CRYPTO_CIPHER_AES_XTS]     = "aes-xts",
89
90         [RTE_CRYPTO_CIPHER_ARC4]        = "arc4",
91
92         [RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
93         [RTE_CRYPTO_CIPHER_DES_DOCSISBPI]       = "des-docsisbpi",
94
95         [RTE_CRYPTO_CIPHER_NULL]        = "null",
96
97         [RTE_CRYPTO_CIPHER_KASUMI_F8]   = "kasumi-f8",
98         [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
99         [RTE_CRYPTO_CIPHER_ZUC_EEA3]    = "zuc-eea3"
100 };
101
102 /**
103  * The crypto cipher operation strings identifiers.
104  * It could be used in application command line.
105  */
106 const char *
107 rte_crypto_cipher_operation_strings[] = {
108                 [RTE_CRYPTO_CIPHER_OP_ENCRYPT]  = "encrypt",
109                 [RTE_CRYPTO_CIPHER_OP_DECRYPT]  = "decrypt"
110 };
111
112 /**
113  * The crypto auth algorithm strings identifiers.
114  * It could be used in application command line.
115  */
116 const char *
117 rte_crypto_auth_algorithm_strings[] = {
118         [RTE_CRYPTO_AUTH_AES_CBC_MAC]   = "aes-cbc-mac",
119         [RTE_CRYPTO_AUTH_AES_CMAC]      = "aes-cmac",
120         [RTE_CRYPTO_AUTH_AES_GMAC]      = "aes-gmac",
121         [RTE_CRYPTO_AUTH_AES_XCBC_MAC]  = "aes-xcbc-mac",
122
123         [RTE_CRYPTO_AUTH_MD5]           = "md5",
124         [RTE_CRYPTO_AUTH_MD5_HMAC]      = "md5-hmac",
125
126         [RTE_CRYPTO_AUTH_NULL]          = "null",
127
128         [RTE_CRYPTO_AUTH_SHA1]          = "sha1",
129         [RTE_CRYPTO_AUTH_SHA1_HMAC]     = "sha1-hmac",
130
131         [RTE_CRYPTO_AUTH_SHA224]        = "sha2-224",
132         [RTE_CRYPTO_AUTH_SHA224_HMAC]   = "sha2-224-hmac",
133         [RTE_CRYPTO_AUTH_SHA256]        = "sha2-256",
134         [RTE_CRYPTO_AUTH_SHA256_HMAC]   = "sha2-256-hmac",
135         [RTE_CRYPTO_AUTH_SHA384]        = "sha2-384",
136         [RTE_CRYPTO_AUTH_SHA384_HMAC]   = "sha2-384-hmac",
137         [RTE_CRYPTO_AUTH_SHA512]        = "sha2-512",
138         [RTE_CRYPTO_AUTH_SHA512_HMAC]   = "sha2-512-hmac",
139
140         [RTE_CRYPTO_AUTH_KASUMI_F9]     = "kasumi-f9",
141         [RTE_CRYPTO_AUTH_SNOW3G_UIA2]   = "snow3g-uia2",
142         [RTE_CRYPTO_AUTH_ZUC_EIA3]      = "zuc-eia3"
143 };
144
145 /**
146  * The crypto AEAD algorithm strings identifiers.
147  * It could be used in application command line.
148  */
149 const char *
150 rte_crypto_aead_algorithm_strings[] = {
151         [RTE_CRYPTO_AEAD_AES_CCM]       = "aes-ccm",
152         [RTE_CRYPTO_AEAD_AES_GCM]       = "aes-gcm",
153         [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
154 };
155
156 /**
157  * The crypto AEAD operation strings identifiers.
158  * It could be used in application command line.
159  */
160 const char *
161 rte_crypto_aead_operation_strings[] = {
162         [RTE_CRYPTO_AEAD_OP_ENCRYPT]    = "encrypt",
163         [RTE_CRYPTO_AEAD_OP_DECRYPT]    = "decrypt"
164 };
165
166 /**
167  * Asymmetric crypto transform operation strings identifiers.
168  */
169 const char *rte_crypto_asym_xform_strings[] = {
170         [RTE_CRYPTO_ASYM_XFORM_NONE]    = "none",
171         [RTE_CRYPTO_ASYM_XFORM_RSA]     = "rsa",
172         [RTE_CRYPTO_ASYM_XFORM_MODEX]   = "modexp",
173         [RTE_CRYPTO_ASYM_XFORM_MODINV]  = "modinv",
174         [RTE_CRYPTO_ASYM_XFORM_DH]      = "dh",
175         [RTE_CRYPTO_ASYM_XFORM_DSA]     = "dsa",
176         [RTE_CRYPTO_ASYM_XFORM_ECDSA]   = "ecdsa",
177         [RTE_CRYPTO_ASYM_XFORM_ECPM]    = "ecpm",
178 };
179
180 /**
181  * Asymmetric crypto operation strings identifiers.
182  */
183 const char *rte_crypto_asym_op_strings[] = {
184         [RTE_CRYPTO_ASYM_OP_ENCRYPT]    = "encrypt",
185         [RTE_CRYPTO_ASYM_OP_DECRYPT]    = "decrypt",
186         [RTE_CRYPTO_ASYM_OP_SIGN]       = "sign",
187         [RTE_CRYPTO_ASYM_OP_VERIFY]     = "verify",
188         [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]       = "priv_key_generate",
189         [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
190         [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
191 };
192
193 /**
194  * The private data structure stored in the session mempool private data.
195  */
196 struct rte_cryptodev_sym_session_pool_private_data {
197         uint16_t nb_drivers;
198         /**< number of elements in sess_data array */
199         uint16_t user_data_sz;
200         /**< session user data will be placed after sess_data */
201 };
202
203 int
204 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
205                 const char *algo_string)
206 {
207         unsigned int i;
208
209         for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
210                 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
211                         *algo_enum = (enum rte_crypto_cipher_algorithm) i;
212                         return 0;
213                 }
214         }
215
216         /* Invalid string */
217         return -1;
218 }
219
220 int
221 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
222                 const char *algo_string)
223 {
224         unsigned int i;
225
226         for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
227                 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
228                         *algo_enum = (enum rte_crypto_auth_algorithm) i;
229                         return 0;
230                 }
231         }
232
233         /* Invalid string */
234         return -1;
235 }
236
237 int
238 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
239                 const char *algo_string)
240 {
241         unsigned int i;
242
243         for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
244                 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
245                         *algo_enum = (enum rte_crypto_aead_algorithm) i;
246                         return 0;
247                 }
248         }
249
250         /* Invalid string */
251         return -1;
252 }
253
254 int
255 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
256                 const char *xform_string)
257 {
258         unsigned int i;
259
260         for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
261                 if (strcmp(xform_string,
262                         rte_crypto_asym_xform_strings[i]) == 0) {
263                         *xform_enum = (enum rte_crypto_asym_xform_type) i;
264                         return 0;
265                 }
266         }
267
268         /* Invalid string */
269         return -1;
270 }
271
272 /**
273  * The crypto auth operation strings identifiers.
274  * It could be used in application command line.
275  */
276 const char *
277 rte_crypto_auth_operation_strings[] = {
278                 [RTE_CRYPTO_AUTH_OP_VERIFY]     = "verify",
279                 [RTE_CRYPTO_AUTH_OP_GENERATE]   = "generate"
280 };
281
282 const struct rte_cryptodev_symmetric_capability *
283 rte_cryptodev_sym_capability_get(uint8_t dev_id,
284                 const struct rte_cryptodev_sym_capability_idx *idx)
285 {
286         const struct rte_cryptodev_capabilities *capability;
287         struct rte_cryptodev_info dev_info;
288         int i = 0;
289
290         rte_cryptodev_info_get(dev_id, &dev_info);
291
292         while ((capability = &dev_info.capabilities[i++])->op !=
293                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
294                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
295                         continue;
296
297                 if (capability->sym.xform_type != idx->type)
298                         continue;
299
300                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
301                         capability->sym.auth.algo == idx->algo.auth)
302                         return &capability->sym;
303
304                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
305                         capability->sym.cipher.algo == idx->algo.cipher)
306                         return &capability->sym;
307
308                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
309                                 capability->sym.aead.algo == idx->algo.aead)
310                         return &capability->sym;
311         }
312
313         return NULL;
314 }
315
316 static int
317 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
318 {
319         unsigned int next_size;
320
321         /* Check lower/upper bounds */
322         if (size < range->min)
323                 return -1;
324
325         if (size > range->max)
326                 return -1;
327
328         /* If range is actually only one value, size is correct */
329         if (range->increment == 0)
330                 return 0;
331
332         /* Check if value is one of the supported sizes */
333         for (next_size = range->min; next_size <= range->max;
334                         next_size += range->increment)
335                 if (size == next_size)
336                         return 0;
337
338         return -1;
339 }
340
341 const struct rte_cryptodev_asymmetric_xform_capability *
342 rte_cryptodev_asym_capability_get(uint8_t dev_id,
343                 const struct rte_cryptodev_asym_capability_idx *idx)
344 {
345         const struct rte_cryptodev_capabilities *capability;
346         struct rte_cryptodev_info dev_info;
347         unsigned int i = 0;
348
349         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
350         rte_cryptodev_info_get(dev_id, &dev_info);
351
352         while ((capability = &dev_info.capabilities[i++])->op !=
353                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
354                 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
355                         continue;
356
357                 if (capability->asym.xform_capa.xform_type == idx->type)
358                         return &capability->asym.xform_capa;
359         }
360         return NULL;
361 };
362
363 int
364 rte_cryptodev_sym_capability_check_cipher(
365                 const struct rte_cryptodev_symmetric_capability *capability,
366                 uint16_t key_size, uint16_t iv_size)
367 {
368         if (param_range_check(key_size, &capability->cipher.key_size) != 0)
369                 return -1;
370
371         if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
372                 return -1;
373
374         return 0;
375 }
376
377 int
378 rte_cryptodev_sym_capability_check_auth(
379                 const struct rte_cryptodev_symmetric_capability *capability,
380                 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
381 {
382         if (param_range_check(key_size, &capability->auth.key_size) != 0)
383                 return -1;
384
385         if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
386                 return -1;
387
388         if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
389                 return -1;
390
391         return 0;
392 }
393
394 int
395 rte_cryptodev_sym_capability_check_aead(
396                 const struct rte_cryptodev_symmetric_capability *capability,
397                 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
398                 uint16_t iv_size)
399 {
400         if (param_range_check(key_size, &capability->aead.key_size) != 0)
401                 return -1;
402
403         if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
404                 return -1;
405
406         if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
407                 return -1;
408
409         if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
410                 return -1;
411
412         return 0;
413 }
414 int
415 rte_cryptodev_asym_xform_capability_check_optype(
416         const struct rte_cryptodev_asymmetric_xform_capability *capability,
417         enum rte_crypto_asym_op_type op_type)
418 {
419         if (capability->op_types & (1 << op_type))
420                 return 1;
421
422         return 0;
423 }
424
425 int
426 rte_cryptodev_asym_xform_capability_check_modlen(
427         const struct rte_cryptodev_asymmetric_xform_capability *capability,
428         uint16_t modlen)
429 {
430         /* no need to check for limits, if min or max = 0 */
431         if (capability->modlen.min != 0) {
432                 if (modlen < capability->modlen.min)
433                         return -1;
434         }
435
436         if (capability->modlen.max != 0) {
437                 if (modlen > capability->modlen.max)
438                         return -1;
439         }
440
441         /* in any case, check if given modlen is module increment */
442         if (capability->modlen.increment != 0) {
443                 if (modlen % (capability->modlen.increment))
444                         return -1;
445         }
446
447         return 0;
448 }
449
450 /* spinlock for crypto device enq callbacks */
451 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
452
453 static void
454 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
455 {
456         struct rte_cryptodev_cb_rcu *list;
457         struct rte_cryptodev_cb *cb, *next;
458         uint16_t qp_id;
459
460         if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
461                 return;
462
463         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
464                 list = &dev->enq_cbs[qp_id];
465                 cb = list->next;
466                 while (cb != NULL) {
467                         next = cb->next;
468                         rte_free(cb);
469                         cb = next;
470                 }
471
472                 rte_free(list->qsbr);
473         }
474
475         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
476                 list = &dev->deq_cbs[qp_id];
477                 cb = list->next;
478                 while (cb != NULL) {
479                         next = cb->next;
480                         rte_free(cb);
481                         cb = next;
482                 }
483
484                 rte_free(list->qsbr);
485         }
486
487         rte_free(dev->enq_cbs);
488         dev->enq_cbs = NULL;
489         rte_free(dev->deq_cbs);
490         dev->deq_cbs = NULL;
491 }
492
493 static int
494 cryptodev_cb_init(struct rte_cryptodev *dev)
495 {
496         struct rte_cryptodev_cb_rcu *list;
497         struct rte_rcu_qsbr *qsbr;
498         uint16_t qp_id;
499         size_t size;
500
501         /* Max thread set to 1, as one DP thread accessing a queue-pair */
502         const uint32_t max_threads = 1;
503
504         dev->enq_cbs = rte_zmalloc(NULL,
505                                    sizeof(struct rte_cryptodev_cb_rcu) *
506                                    dev->data->nb_queue_pairs, 0);
507         if (dev->enq_cbs == NULL) {
508                 CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
509                 return -ENOMEM;
510         }
511
512         dev->deq_cbs = rte_zmalloc(NULL,
513                                    sizeof(struct rte_cryptodev_cb_rcu) *
514                                    dev->data->nb_queue_pairs, 0);
515         if (dev->deq_cbs == NULL) {
516                 CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
517                 rte_free(dev->enq_cbs);
518                 return -ENOMEM;
519         }
520
521         /* Create RCU QSBR variable */
522         size = rte_rcu_qsbr_get_memsize(max_threads);
523
524         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
525                 list = &dev->enq_cbs[qp_id];
526                 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
527                 if (qsbr == NULL) {
528                         CDEV_LOG_ERR("Failed to allocate memory for RCU on "
529                                 "queue_pair_id=%d", qp_id);
530                         goto cb_init_err;
531                 }
532
533                 if (rte_rcu_qsbr_init(qsbr, max_threads)) {
534                         CDEV_LOG_ERR("Failed to initialize for RCU on "
535                                 "queue_pair_id=%d", qp_id);
536                         goto cb_init_err;
537                 }
538
539                 list->qsbr = qsbr;
540         }
541
542         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
543                 list = &dev->deq_cbs[qp_id];
544                 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
545                 if (qsbr == NULL) {
546                         CDEV_LOG_ERR("Failed to allocate memory for RCU on "
547                                 "queue_pair_id=%d", qp_id);
548                         goto cb_init_err;
549                 }
550
551                 if (rte_rcu_qsbr_init(qsbr, max_threads)) {
552                         CDEV_LOG_ERR("Failed to initialize for RCU on "
553                                 "queue_pair_id=%d", qp_id);
554                         goto cb_init_err;
555                 }
556
557                 list->qsbr = qsbr;
558         }
559
560         return 0;
561
562 cb_init_err:
563         cryptodev_cb_cleanup(dev);
564         return -ENOMEM;
565 }
566
567 const char *
568 rte_cryptodev_get_feature_name(uint64_t flag)
569 {
570         switch (flag) {
571         case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
572                 return "SYMMETRIC_CRYPTO";
573         case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
574                 return "ASYMMETRIC_CRYPTO";
575         case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
576                 return "SYM_OPERATION_CHAINING";
577         case RTE_CRYPTODEV_FF_CPU_SSE:
578                 return "CPU_SSE";
579         case RTE_CRYPTODEV_FF_CPU_AVX:
580                 return "CPU_AVX";
581         case RTE_CRYPTODEV_FF_CPU_AVX2:
582                 return "CPU_AVX2";
583         case RTE_CRYPTODEV_FF_CPU_AVX512:
584                 return "CPU_AVX512";
585         case RTE_CRYPTODEV_FF_CPU_AESNI:
586                 return "CPU_AESNI";
587         case RTE_CRYPTODEV_FF_HW_ACCELERATED:
588                 return "HW_ACCELERATED";
589         case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
590                 return "IN_PLACE_SGL";
591         case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
592                 return "OOP_SGL_IN_SGL_OUT";
593         case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
594                 return "OOP_SGL_IN_LB_OUT";
595         case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
596                 return "OOP_LB_IN_SGL_OUT";
597         case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
598                 return "OOP_LB_IN_LB_OUT";
599         case RTE_CRYPTODEV_FF_CPU_NEON:
600                 return "CPU_NEON";
601         case RTE_CRYPTODEV_FF_CPU_ARM_CE:
602                 return "CPU_ARM_CE";
603         case RTE_CRYPTODEV_FF_SECURITY:
604                 return "SECURITY_PROTOCOL";
605         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
606                 return "RSA_PRIV_OP_KEY_EXP";
607         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
608                 return "RSA_PRIV_OP_KEY_QT";
609         case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
610                 return "DIGEST_ENCRYPTED";
611         case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
612                 return "SYM_CPU_CRYPTO";
613         case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
614                 return "ASYM_SESSIONLESS";
615         case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
616                 return "SYM_SESSIONLESS";
617         case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
618                 return "NON_BYTE_ALIGNED_DATA";
619         case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
620                 return "CIPHER_MULTIPLE_DATA_UNITS";
621         case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
622                 return "CIPHER_WRAPPED_KEY";
623         default:
624                 return NULL;
625         }
626 }
627
628 struct rte_cryptodev *
629 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
630 {
631         return &cryptodev_globals.devs[dev_id];
632 }
633
634 struct rte_cryptodev *
635 rte_cryptodev_pmd_get_named_dev(const char *name)
636 {
637         struct rte_cryptodev *dev;
638         unsigned int i;
639
640         if (name == NULL)
641                 return NULL;
642
643         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
644                 dev = &cryptodev_globals.devs[i];
645
646                 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
647                                 (strcmp(dev->data->name, name) == 0))
648                         return dev;
649         }
650
651         return NULL;
652 }
653
654 static inline uint8_t
655 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
656 {
657         if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
658                         rte_crypto_devices[dev_id].data == NULL)
659                 return 0;
660
661         return 1;
662 }
663
664 unsigned int
665 rte_cryptodev_is_valid_dev(uint8_t dev_id)
666 {
667         struct rte_cryptodev *dev = NULL;
668
669         if (!rte_cryptodev_is_valid_device_data(dev_id))
670                 return 0;
671
672         dev = rte_cryptodev_pmd_get_dev(dev_id);
673         if (dev->attached != RTE_CRYPTODEV_ATTACHED)
674                 return 0;
675         else
676                 return 1;
677 }
678
679
680 int
681 rte_cryptodev_get_dev_id(const char *name)
682 {
683         unsigned i;
684
685         if (name == NULL)
686                 return -1;
687
688         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
689                 if (!rte_cryptodev_is_valid_device_data(i))
690                         continue;
691                 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
692                                 == 0) &&
693                                 (cryptodev_globals.devs[i].attached ==
694                                                 RTE_CRYPTODEV_ATTACHED))
695                         return i;
696         }
697
698         return -1;
699 }
700
701 uint8_t
702 rte_cryptodev_count(void)
703 {
704         return cryptodev_globals.nb_devs;
705 }
706
707 uint8_t
708 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
709 {
710         uint8_t i, dev_count = 0;
711
712         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
713                 if (cryptodev_globals.devs[i].driver_id == driver_id &&
714                         cryptodev_globals.devs[i].attached ==
715                                         RTE_CRYPTODEV_ATTACHED)
716                         dev_count++;
717
718         return dev_count;
719 }
720
721 uint8_t
722 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
723         uint8_t nb_devices)
724 {
725         uint8_t i, count = 0;
726         struct rte_cryptodev *devs = cryptodev_globals.devs;
727
728         for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
729                 if (!rte_cryptodev_is_valid_device_data(i))
730                         continue;
731
732                 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
733                         int cmp;
734
735                         cmp = strncmp(devs[i].device->driver->name,
736                                         driver_name,
737                                         strlen(driver_name) + 1);
738
739                         if (cmp == 0)
740                                 devices[count++] = devs[i].data->dev_id;
741                 }
742         }
743
744         return count;
745 }
746
747 void *
748 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
749 {
750         if (dev_id < RTE_CRYPTO_MAX_DEVS &&
751                         (rte_crypto_devices[dev_id].feature_flags &
752                         RTE_CRYPTODEV_FF_SECURITY))
753                 return rte_crypto_devices[dev_id].security_ctx;
754
755         return NULL;
756 }
757
758 int
759 rte_cryptodev_socket_id(uint8_t dev_id)
760 {
761         struct rte_cryptodev *dev;
762
763         if (!rte_cryptodev_is_valid_dev(dev_id))
764                 return -1;
765
766         dev = rte_cryptodev_pmd_get_dev(dev_id);
767
768         return dev->data->socket_id;
769 }
770
771 static inline int
772 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
773                 int socket_id)
774 {
775         char mz_name[RTE_MEMZONE_NAMESIZE];
776         const struct rte_memzone *mz;
777         int n;
778
779         /* generate memzone name */
780         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
781         if (n >= (int)sizeof(mz_name))
782                 return -EINVAL;
783
784         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
785                 mz = rte_memzone_reserve(mz_name,
786                                 sizeof(struct rte_cryptodev_data),
787                                 socket_id, 0);
788                 CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
789                                 mz_name, mz);
790         } else {
791                 mz = rte_memzone_lookup(mz_name);
792                 CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
793                                 mz_name, mz);
794         }
795
796         if (mz == NULL)
797                 return -ENOMEM;
798
799         *data = mz->addr;
800         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
801                 memset(*data, 0, sizeof(struct rte_cryptodev_data));
802
803         return 0;
804 }
805
806 static inline int
807 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
808 {
809         char mz_name[RTE_MEMZONE_NAMESIZE];
810         const struct rte_memzone *mz;
811         int n;
812
813         /* generate memzone name */
814         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
815         if (n >= (int)sizeof(mz_name))
816                 return -EINVAL;
817
818         mz = rte_memzone_lookup(mz_name);
819         if (mz == NULL)
820                 return -ENOMEM;
821
822         RTE_ASSERT(*data == mz->addr);
823         *data = NULL;
824
825         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
826                 CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
827                                 mz_name, mz);
828                 return rte_memzone_free(mz);
829         } else {
830                 CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
831                                 mz_name, mz);
832         }
833
834         return 0;
835 }
836
837 static uint8_t
838 rte_cryptodev_find_free_device_index(void)
839 {
840         uint8_t dev_id;
841
842         for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
843                 if (rte_crypto_devices[dev_id].attached ==
844                                 RTE_CRYPTODEV_DETACHED)
845                         return dev_id;
846         }
847         return RTE_CRYPTO_MAX_DEVS;
848 }
849
850 struct rte_cryptodev *
851 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
852 {
853         struct rte_cryptodev *cryptodev;
854         uint8_t dev_id;
855
856         if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
857                 CDEV_LOG_ERR("Crypto device with name %s already "
858                                 "allocated!", name);
859                 return NULL;
860         }
861
862         dev_id = rte_cryptodev_find_free_device_index();
863         if (dev_id == RTE_CRYPTO_MAX_DEVS) {
864                 CDEV_LOG_ERR("Reached maximum number of crypto devices");
865                 return NULL;
866         }
867
868         cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
869
870         if (cryptodev->data == NULL) {
871                 struct rte_cryptodev_data **cryptodev_data =
872                                 &cryptodev_globals.data[dev_id];
873
874                 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
875                                 socket_id);
876
877                 if (retval < 0 || *cryptodev_data == NULL)
878                         return NULL;
879
880                 cryptodev->data = *cryptodev_data;
881
882                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
883                         strlcpy(cryptodev->data->name, name,
884                                 RTE_CRYPTODEV_NAME_MAX_LEN);
885
886                         cryptodev->data->dev_id = dev_id;
887                         cryptodev->data->socket_id = socket_id;
888                         cryptodev->data->dev_started = 0;
889                         CDEV_LOG_DEBUG("PRIMARY:init data");
890                 }
891
892                 CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
893                                 cryptodev->data->name,
894                                 cryptodev->data->dev_id,
895                                 cryptodev->data->socket_id,
896                                 cryptodev->data->dev_started);
897
898                 /* init user callbacks */
899                 TAILQ_INIT(&(cryptodev->link_intr_cbs));
900
901                 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
902
903                 cryptodev_globals.nb_devs++;
904         }
905
906         return cryptodev;
907 }
908
909 int
910 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
911 {
912         int ret;
913         uint8_t dev_id;
914
915         if (cryptodev == NULL)
916                 return -EINVAL;
917
918         dev_id = cryptodev->data->dev_id;
919
920         /* Close device only if device operations have been set */
921         if (cryptodev->dev_ops) {
922                 ret = rte_cryptodev_close(dev_id);
923                 if (ret < 0)
924                         return ret;
925         }
926
927         ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
928         if (ret < 0)
929                 return ret;
930
931         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
932         cryptodev_globals.nb_devs--;
933         return 0;
934 }
935
936 uint16_t
937 rte_cryptodev_queue_pair_count(uint8_t dev_id)
938 {
939         struct rte_cryptodev *dev;
940
941         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
942                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
943                 return 0;
944         }
945
946         dev = &rte_crypto_devices[dev_id];
947         return dev->data->nb_queue_pairs;
948 }
949
950 static int
951 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
952                 int socket_id)
953 {
954         struct rte_cryptodev_info dev_info;
955         void **qp;
956         unsigned i;
957
958         if ((dev == NULL) || (nb_qpairs < 1)) {
959                 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
960                                                         dev, nb_qpairs);
961                 return -EINVAL;
962         }
963
964         CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
965                         nb_qpairs, dev->data->dev_id);
966
967         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
968
969         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
970         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
971
972         if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
973                 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
974                                 nb_qpairs, dev->data->dev_id);
975             return -EINVAL;
976         }
977
978         if (dev->data->queue_pairs == NULL) { /* first time configuration */
979                 dev->data->queue_pairs = rte_zmalloc_socket(
980                                 "cryptodev->queue_pairs",
981                                 sizeof(dev->data->queue_pairs[0]) *
982                                 dev_info.max_nb_queue_pairs,
983                                 RTE_CACHE_LINE_SIZE, socket_id);
984
985                 if (dev->data->queue_pairs == NULL) {
986                         dev->data->nb_queue_pairs = 0;
987                         CDEV_LOG_ERR("failed to get memory for qp meta data, "
988                                                         "nb_queues %u",
989                                                         nb_qpairs);
990                         return -(ENOMEM);
991                 }
992         } else { /* re-configure */
993                 int ret;
994                 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
995
996                 qp = dev->data->queue_pairs;
997
998                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
999                                 -ENOTSUP);
1000
1001                 for (i = nb_qpairs; i < old_nb_queues; i++) {
1002                         ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1003                         if (ret < 0)
1004                                 return ret;
1005                         qp[i] = NULL;
1006                 }
1007
1008         }
1009         dev->data->nb_queue_pairs = nb_qpairs;
1010         return 0;
1011 }
1012
1013 int
1014 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1015 {
1016         struct rte_cryptodev *dev;
1017         int diag;
1018
1019         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1020                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1021                 return -EINVAL;
1022         }
1023
1024         dev = &rte_crypto_devices[dev_id];
1025
1026         if (dev->data->dev_started) {
1027                 CDEV_LOG_ERR(
1028                     "device %d must be stopped to allow configuration", dev_id);
1029                 return -EBUSY;
1030         }
1031
1032         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1033
1034         rte_spinlock_lock(&rte_cryptodev_callback_lock);
1035         cryptodev_cb_cleanup(dev);
1036         rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1037
1038         /* Setup new number of queue pairs and reconfigure device. */
1039         diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1040                         config->socket_id);
1041         if (diag != 0) {
1042                 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1043                                 dev_id, diag);
1044                 return diag;
1045         }
1046
1047         rte_spinlock_lock(&rte_cryptodev_callback_lock);
1048         diag = cryptodev_cb_init(dev);
1049         rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1050         if (diag) {
1051                 CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1052                 return diag;
1053         }
1054
1055         rte_cryptodev_trace_configure(dev_id, config);
1056         return (*dev->dev_ops->dev_configure)(dev, config);
1057 }
1058
1059 int
1060 rte_cryptodev_start(uint8_t dev_id)
1061 {
1062         struct rte_cryptodev *dev;
1063         int diag;
1064
1065         CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1066
1067         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1068                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1069                 return -EINVAL;
1070         }
1071
1072         dev = &rte_crypto_devices[dev_id];
1073
1074         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1075
1076         if (dev->data->dev_started != 0) {
1077                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1078                         dev_id);
1079                 return 0;
1080         }
1081
1082         diag = (*dev->dev_ops->dev_start)(dev);
1083         rte_cryptodev_trace_start(dev_id, diag);
1084         if (diag == 0)
1085                 dev->data->dev_started = 1;
1086         else
1087                 return diag;
1088
1089         return 0;
1090 }
1091
1092 void
1093 rte_cryptodev_stop(uint8_t dev_id)
1094 {
1095         struct rte_cryptodev *dev;
1096
1097         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1098                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1099                 return;
1100         }
1101
1102         dev = &rte_crypto_devices[dev_id];
1103
1104         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1105
1106         if (dev->data->dev_started == 0) {
1107                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1108                         dev_id);
1109                 return;
1110         }
1111
1112         (*dev->dev_ops->dev_stop)(dev);
1113         rte_cryptodev_trace_stop(dev_id);
1114         dev->data->dev_started = 0;
1115 }
1116
1117 int
1118 rte_cryptodev_close(uint8_t dev_id)
1119 {
1120         struct rte_cryptodev *dev;
1121         int retval;
1122
1123         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1124                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1125                 return -1;
1126         }
1127
1128         dev = &rte_crypto_devices[dev_id];
1129
1130         /* Device must be stopped before it can be closed */
1131         if (dev->data->dev_started == 1) {
1132                 CDEV_LOG_ERR("Device %u must be stopped before closing",
1133                                 dev_id);
1134                 return -EBUSY;
1135         }
1136
1137         /* We can't close the device if there are outstanding sessions in use */
1138         if (dev->data->session_pool != NULL) {
1139                 if (!rte_mempool_full(dev->data->session_pool)) {
1140                         CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1141                                         "has sessions still in use, free "
1142                                         "all sessions before calling close",
1143                                         (unsigned)dev_id);
1144                         return -EBUSY;
1145                 }
1146         }
1147
1148         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1149         retval = (*dev->dev_ops->dev_close)(dev);
1150         rte_cryptodev_trace_close(dev_id, retval);
1151
1152         if (retval < 0)
1153                 return retval;
1154
1155         return 0;
1156 }
1157
1158 int
1159 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1160 {
1161         struct rte_cryptodev *dev;
1162
1163         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1164                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1165                 return -EINVAL;
1166         }
1167
1168         dev = &rte_crypto_devices[dev_id];
1169         if (queue_pair_id >= dev->data->nb_queue_pairs) {
1170                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1171                 return -EINVAL;
1172         }
1173         void **qps = dev->data->queue_pairs;
1174
1175         if (qps[queue_pair_id]) {
1176                 CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1177                         queue_pair_id, dev_id);
1178                 return 1;
1179         }
1180
1181         CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1182                 queue_pair_id, dev_id);
1183
1184         return 0;
1185 }
1186
1187 int
1188 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1189                 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1190
1191 {
1192         struct rte_cryptodev *dev;
1193
1194         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1195                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1196                 return -EINVAL;
1197         }
1198
1199         dev = &rte_crypto_devices[dev_id];
1200         if (queue_pair_id >= dev->data->nb_queue_pairs) {
1201                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1202                 return -EINVAL;
1203         }
1204
1205         if (!qp_conf) {
1206                 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1207                 return -EINVAL;
1208         }
1209
1210         if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1211                         (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1212                 CDEV_LOG_ERR("Invalid mempools\n");
1213                 return -EINVAL;
1214         }
1215
1216         if (qp_conf->mp_session) {
1217                 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1218                 uint32_t obj_size = qp_conf->mp_session->elt_size;
1219                 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1220                 struct rte_cryptodev_sym_session s = {0};
1221
1222                 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1223                 if (!pool_priv || qp_conf->mp_session->private_data_size <
1224                                 sizeof(*pool_priv)) {
1225                         CDEV_LOG_ERR("Invalid mempool\n");
1226                         return -EINVAL;
1227                 }
1228
1229                 s.nb_drivers = pool_priv->nb_drivers;
1230                 s.user_data_sz = pool_priv->user_data_sz;
1231
1232                 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1233                         obj_size) || (s.nb_drivers <= dev->driver_id) ||
1234                         rte_cryptodev_sym_get_private_session_size(dev_id) >
1235                                 obj_priv_size) {
1236                         CDEV_LOG_ERR("Invalid mempool\n");
1237                         return -EINVAL;
1238                 }
1239         }
1240
1241         if (dev->data->dev_started) {
1242                 CDEV_LOG_ERR(
1243                     "device %d must be stopped to allow configuration", dev_id);
1244                 return -EBUSY;
1245         }
1246
1247         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1248
1249         rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1250         return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1251                         socket_id);
1252 }
1253
1254 struct rte_cryptodev_cb *
1255 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1256                                uint16_t qp_id,
1257                                rte_cryptodev_callback_fn cb_fn,
1258                                void *cb_arg)
1259 {
1260         struct rte_cryptodev *dev;
1261         struct rte_cryptodev_cb_rcu *list;
1262         struct rte_cryptodev_cb *cb, *tail;
1263
1264         if (!cb_fn) {
1265                 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1266                 rte_errno = EINVAL;
1267                 return NULL;
1268         }
1269
1270         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1271                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1272                 rte_errno = ENODEV;
1273                 return NULL;
1274         }
1275
1276         dev = &rte_crypto_devices[dev_id];
1277         if (qp_id >= dev->data->nb_queue_pairs) {
1278                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1279                 rte_errno = ENODEV;
1280                 return NULL;
1281         }
1282
1283         cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1284         if (cb == NULL) {
1285                 CDEV_LOG_ERR("Failed to allocate memory for callback on "
1286                              "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1287                 rte_errno = ENOMEM;
1288                 return NULL;
1289         }
1290
1291         rte_spinlock_lock(&rte_cryptodev_callback_lock);
1292
1293         cb->fn = cb_fn;
1294         cb->arg = cb_arg;
1295
1296         /* Add the callbacks in fifo order. */
1297         list = &dev->enq_cbs[qp_id];
1298         tail = list->next;
1299
1300         if (tail) {
1301                 while (tail->next)
1302                         tail = tail->next;
1303                 /* Stores to cb->fn and cb->param should complete before
1304                  * cb is visible to data plane.
1305                  */
1306                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1307         } else {
1308                 /* Stores to cb->fn and cb->param should complete before
1309                  * cb is visible to data plane.
1310                  */
1311                 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1312         }
1313
1314         rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1315
1316         return cb;
1317 }
1318
1319 int
1320 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1321                                   uint16_t qp_id,
1322                                   struct rte_cryptodev_cb *cb)
1323 {
1324         struct rte_cryptodev *dev;
1325         struct rte_cryptodev_cb **prev_cb, *curr_cb;
1326         struct rte_cryptodev_cb_rcu *list;
1327         int ret;
1328
1329         ret = -EINVAL;
1330
1331         if (!cb) {
1332                 CDEV_LOG_ERR("Callback is NULL");
1333                 return -EINVAL;
1334         }
1335
1336         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1337                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1338                 return -ENODEV;
1339         }
1340
1341         dev = &rte_crypto_devices[dev_id];
1342         if (qp_id >= dev->data->nb_queue_pairs) {
1343                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1344                 return -ENODEV;
1345         }
1346
1347         rte_spinlock_lock(&rte_cryptodev_callback_lock);
1348         if (dev->enq_cbs == NULL) {
1349                 CDEV_LOG_ERR("Callback not initialized");
1350                 goto cb_err;
1351         }
1352
1353         list = &dev->enq_cbs[qp_id];
1354         if (list == NULL) {
1355                 CDEV_LOG_ERR("Callback list is NULL");
1356                 goto cb_err;
1357         }
1358
1359         if (list->qsbr == NULL) {
1360                 CDEV_LOG_ERR("Rcu qsbr is NULL");
1361                 goto cb_err;
1362         }
1363
1364         prev_cb = &list->next;
1365         for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1366                 curr_cb = *prev_cb;
1367                 if (curr_cb == cb) {
1368                         /* Remove the user cb from the callback list. */
1369                         __atomic_store_n(prev_cb, curr_cb->next,
1370                                 __ATOMIC_RELAXED);
1371                         ret = 0;
1372                         break;
1373                 }
1374         }
1375
1376         if (!ret) {
1377                 /* Call sync with invalid thread id as this is part of
1378                  * control plane API
1379                  */
1380                 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1381                 rte_free(cb);
1382         }
1383
1384 cb_err:
1385         rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1386         return ret;
1387 }
1388
1389 struct rte_cryptodev_cb *
1390 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1391                                uint16_t qp_id,
1392                                rte_cryptodev_callback_fn cb_fn,
1393                                void *cb_arg)
1394 {
1395         struct rte_cryptodev *dev;
1396         struct rte_cryptodev_cb_rcu *list;
1397         struct rte_cryptodev_cb *cb, *tail;
1398
1399         if (!cb_fn) {
1400                 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1401                 rte_errno = EINVAL;
1402                 return NULL;
1403         }
1404
1405         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1406                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1407                 rte_errno = ENODEV;
1408                 return NULL;
1409         }
1410
1411         dev = &rte_crypto_devices[dev_id];
1412         if (qp_id >= dev->data->nb_queue_pairs) {
1413                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1414                 rte_errno = ENODEV;
1415                 return NULL;
1416         }
1417
1418         cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1419         if (cb == NULL) {
1420                 CDEV_LOG_ERR("Failed to allocate memory for callback on "
1421                              "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1422                 rte_errno = ENOMEM;
1423                 return NULL;
1424         }
1425
1426         rte_spinlock_lock(&rte_cryptodev_callback_lock);
1427
1428         cb->fn = cb_fn;
1429         cb->arg = cb_arg;
1430
1431         /* Add the callbacks in fifo order. */
1432         list = &dev->deq_cbs[qp_id];
1433         tail = list->next;
1434
1435         if (tail) {
1436                 while (tail->next)
1437                         tail = tail->next;
1438                 /* Stores to cb->fn and cb->param should complete before
1439                  * cb is visible to data plane.
1440                  */
1441                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1442         } else {
1443                 /* Stores to cb->fn and cb->param should complete before
1444                  * cb is visible to data plane.
1445                  */
1446                 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1447         }
1448
1449         rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1450
1451         return cb;
1452 }
1453
1454 int
1455 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1456                                   uint16_t qp_id,
1457                                   struct rte_cryptodev_cb *cb)
1458 {
1459         struct rte_cryptodev *dev;
1460         struct rte_cryptodev_cb **prev_cb, *curr_cb;
1461         struct rte_cryptodev_cb_rcu *list;
1462         int ret;
1463
1464         ret = -EINVAL;
1465
1466         if (!cb) {
1467                 CDEV_LOG_ERR("Callback is NULL");
1468                 return -EINVAL;
1469         }
1470
1471         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1472                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1473                 return -ENODEV;
1474         }
1475
1476         dev = &rte_crypto_devices[dev_id];
1477         if (qp_id >= dev->data->nb_queue_pairs) {
1478                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1479                 return -ENODEV;
1480         }
1481
1482         rte_spinlock_lock(&rte_cryptodev_callback_lock);
1483         if (dev->enq_cbs == NULL) {
1484                 CDEV_LOG_ERR("Callback not initialized");
1485                 goto cb_err;
1486         }
1487
1488         list = &dev->deq_cbs[qp_id];
1489         if (list == NULL) {
1490                 CDEV_LOG_ERR("Callback list is NULL");
1491                 goto cb_err;
1492         }
1493
1494         if (list->qsbr == NULL) {
1495                 CDEV_LOG_ERR("Rcu qsbr is NULL");
1496                 goto cb_err;
1497         }
1498
1499         prev_cb = &list->next;
1500         for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1501                 curr_cb = *prev_cb;
1502                 if (curr_cb == cb) {
1503                         /* Remove the user cb from the callback list. */
1504                         __atomic_store_n(prev_cb, curr_cb->next,
1505                                 __ATOMIC_RELAXED);
1506                         ret = 0;
1507                         break;
1508                 }
1509         }
1510
1511         if (!ret) {
1512                 /* Call sync with invalid thread id as this is part of
1513                  * control plane API
1514                  */
1515                 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1516                 rte_free(cb);
1517         }
1518
1519 cb_err:
1520         rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1521         return ret;
1522 }
1523
1524 int
1525 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1526 {
1527         struct rte_cryptodev *dev;
1528
1529         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1530                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1531                 return -ENODEV;
1532         }
1533
1534         if (stats == NULL) {
1535                 CDEV_LOG_ERR("Invalid stats ptr");
1536                 return -EINVAL;
1537         }
1538
1539         dev = &rte_crypto_devices[dev_id];
1540         memset(stats, 0, sizeof(*stats));
1541
1542         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1543         (*dev->dev_ops->stats_get)(dev, stats);
1544         return 0;
1545 }
1546
1547 void
1548 rte_cryptodev_stats_reset(uint8_t dev_id)
1549 {
1550         struct rte_cryptodev *dev;
1551
1552         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1553                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1554                 return;
1555         }
1556
1557         dev = &rte_crypto_devices[dev_id];
1558
1559         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1560         (*dev->dev_ops->stats_reset)(dev);
1561 }
1562
1563 void
1564 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1565 {
1566         struct rte_cryptodev *dev;
1567
1568         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1569                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1570                 return;
1571         }
1572
1573         dev = &rte_crypto_devices[dev_id];
1574
1575         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1576
1577         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1578         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1579
1580         dev_info->driver_name = dev->device->driver->name;
1581         dev_info->device = dev->device;
1582 }
1583
1584 int
1585 rte_cryptodev_callback_register(uint8_t dev_id,
1586                         enum rte_cryptodev_event_type event,
1587                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1588 {
1589         struct rte_cryptodev *dev;
1590         struct rte_cryptodev_callback *user_cb;
1591
1592         if (!cb_fn)
1593                 return -EINVAL;
1594
1595         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1596                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1597                 return -EINVAL;
1598         }
1599
1600         dev = &rte_crypto_devices[dev_id];
1601         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1602
1603         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1604                 if (user_cb->cb_fn == cb_fn &&
1605                         user_cb->cb_arg == cb_arg &&
1606                         user_cb->event == event) {
1607                         break;
1608                 }
1609         }
1610
1611         /* create a new callback. */
1612         if (user_cb == NULL) {
1613                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1614                                 sizeof(struct rte_cryptodev_callback), 0);
1615                 if (user_cb != NULL) {
1616                         user_cb->cb_fn = cb_fn;
1617                         user_cb->cb_arg = cb_arg;
1618                         user_cb->event = event;
1619                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1620                 }
1621         }
1622
1623         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1624         return (user_cb == NULL) ? -ENOMEM : 0;
1625 }
1626
1627 int
1628 rte_cryptodev_callback_unregister(uint8_t dev_id,
1629                         enum rte_cryptodev_event_type event,
1630                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1631 {
1632         int ret;
1633         struct rte_cryptodev *dev;
1634         struct rte_cryptodev_callback *cb, *next;
1635
1636         if (!cb_fn)
1637                 return -EINVAL;
1638
1639         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1640                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1641                 return -EINVAL;
1642         }
1643
1644         dev = &rte_crypto_devices[dev_id];
1645         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1646
1647         ret = 0;
1648         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1649
1650                 next = TAILQ_NEXT(cb, next);
1651
1652                 if (cb->cb_fn != cb_fn || cb->event != event ||
1653                                 (cb->cb_arg != (void *)-1 &&
1654                                 cb->cb_arg != cb_arg))
1655                         continue;
1656
1657                 /*
1658                  * if this callback is not executing right now,
1659                  * then remove it.
1660                  */
1661                 if (cb->active == 0) {
1662                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1663                         rte_free(cb);
1664                 } else {
1665                         ret = -EAGAIN;
1666                 }
1667         }
1668
1669         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1670         return ret;
1671 }
1672
1673 void
1674 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1675         enum rte_cryptodev_event_type event)
1676 {
1677         struct rte_cryptodev_callback *cb_lst;
1678         struct rte_cryptodev_callback dev_cb;
1679
1680         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1681         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1682                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1683                         continue;
1684                 dev_cb = *cb_lst;
1685                 cb_lst->active = 1;
1686                 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1687                 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1688                                                 dev_cb.cb_arg);
1689                 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1690                 cb_lst->active = 0;
1691         }
1692         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1693 }
1694
1695 int
1696 rte_cryptodev_sym_session_init(uint8_t dev_id,
1697                 struct rte_cryptodev_sym_session *sess,
1698                 struct rte_crypto_sym_xform *xforms,
1699                 struct rte_mempool *mp)
1700 {
1701         struct rte_cryptodev *dev;
1702         uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1703                         dev_id);
1704         uint8_t index;
1705         int ret;
1706
1707         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1708                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1709                 return -EINVAL;
1710         }
1711
1712         dev = rte_cryptodev_pmd_get_dev(dev_id);
1713
1714         if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1715                 return -EINVAL;
1716
1717         if (mp->elt_size < sess_priv_sz)
1718                 return -EINVAL;
1719
1720         index = dev->driver_id;
1721         if (index >= sess->nb_drivers)
1722                 return -EINVAL;
1723
1724         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1725
1726         if (sess->sess_data[index].refcnt == 0) {
1727                 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1728                                                         sess, mp);
1729                 if (ret < 0) {
1730                         CDEV_LOG_ERR(
1731                                 "dev_id %d failed to configure session details",
1732                                 dev_id);
1733                         return ret;
1734                 }
1735         }
1736
1737         rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1738         sess->sess_data[index].refcnt++;
1739         return 0;
1740 }
1741
1742 int
1743 rte_cryptodev_asym_session_init(uint8_t dev_id,
1744                 struct rte_cryptodev_asym_session *sess,
1745                 struct rte_crypto_asym_xform *xforms,
1746                 struct rte_mempool *mp)
1747 {
1748         struct rte_cryptodev *dev;
1749         uint8_t index;
1750         int ret;
1751
1752         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1753                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1754                 return -EINVAL;
1755         }
1756
1757         dev = rte_cryptodev_pmd_get_dev(dev_id);
1758
1759         if (sess == NULL || xforms == NULL || dev == NULL)
1760                 return -EINVAL;
1761
1762         index = dev->driver_id;
1763
1764         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1765                                 -ENOTSUP);
1766
1767         if (sess->sess_private_data[index] == NULL) {
1768                 ret = dev->dev_ops->asym_session_configure(dev,
1769                                                         xforms,
1770                                                         sess, mp);
1771                 if (ret < 0) {
1772                         CDEV_LOG_ERR(
1773                                 "dev_id %d failed to configure session details",
1774                                 dev_id);
1775                         return ret;
1776                 }
1777         }
1778
1779         rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1780         return 0;
1781 }
1782
1783 struct rte_mempool *
1784 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1785         uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1786         int socket_id)
1787 {
1788         struct rte_mempool *mp;
1789         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1790         uint32_t obj_sz;
1791
1792         obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1793         if (obj_sz > elt_size)
1794                 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1795                                 obj_sz);
1796         else
1797                 obj_sz = elt_size;
1798
1799         mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1800                         (uint32_t)(sizeof(*pool_priv)),
1801                         NULL, NULL, NULL, NULL,
1802                         socket_id, 0);
1803         if (mp == NULL) {
1804                 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1805                         __func__, name, rte_errno);
1806                 return NULL;
1807         }
1808
1809         pool_priv = rte_mempool_get_priv(mp);
1810         if (!pool_priv) {
1811                 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1812                         __func__, name);
1813                 rte_mempool_free(mp);
1814                 return NULL;
1815         }
1816
1817         pool_priv->nb_drivers = nb_drivers;
1818         pool_priv->user_data_sz = user_data_size;
1819
1820         rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1821                 elt_size, cache_size, user_data_size, mp);
1822         return mp;
1823 }
1824
1825 static unsigned int
1826 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1827 {
1828         return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1829                         sess->user_data_sz;
1830 }
1831
1832 static uint8_t
1833 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1834 {
1835         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1836
1837         if (!mp)
1838                 return 0;
1839
1840         pool_priv = rte_mempool_get_priv(mp);
1841
1842         if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1843                         pool_priv->nb_drivers != nb_drivers ||
1844                         mp->elt_size <
1845                                 rte_cryptodev_sym_get_header_session_size()
1846                                 + pool_priv->user_data_sz)
1847                 return 0;
1848
1849         return 1;
1850 }
1851
1852 struct rte_cryptodev_sym_session *
1853 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1854 {
1855         struct rte_cryptodev_sym_session *sess;
1856         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1857
1858         if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1859                 CDEV_LOG_ERR("Invalid mempool\n");
1860                 return NULL;
1861         }
1862
1863         pool_priv = rte_mempool_get_priv(mp);
1864
1865         /* Allocate a session structure from the session pool */
1866         if (rte_mempool_get(mp, (void **)&sess)) {
1867                 CDEV_LOG_ERR("couldn't get object from session mempool");
1868                 return NULL;
1869         }
1870
1871         sess->nb_drivers = pool_priv->nb_drivers;
1872         sess->user_data_sz = pool_priv->user_data_sz;
1873         sess->opaque_data = 0;
1874
1875         /* Clear device session pointer.
1876          * Include the flag indicating presence of user data
1877          */
1878         memset(sess->sess_data, 0,
1879                         rte_cryptodev_sym_session_data_size(sess));
1880
1881         rte_cryptodev_trace_sym_session_create(mp, sess);
1882         return sess;
1883 }
1884
1885 struct rte_cryptodev_asym_session *
1886 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1887 {
1888         struct rte_cryptodev_asym_session *sess;
1889         unsigned int session_size =
1890                         rte_cryptodev_asym_get_header_session_size();
1891
1892         if (!mp) {
1893                 CDEV_LOG_ERR("invalid mempool\n");
1894                 return NULL;
1895         }
1896
1897         /* Verify if provided mempool can hold elements big enough. */
1898         if (mp->elt_size < session_size) {
1899                 CDEV_LOG_ERR(
1900                         "mempool elements too small to hold session objects");
1901                 return NULL;
1902         }
1903
1904         /* Allocate a session structure from the session pool */
1905         if (rte_mempool_get(mp, (void **)&sess)) {
1906                 CDEV_LOG_ERR("couldn't get object from session mempool");
1907                 return NULL;
1908         }
1909
1910         /* Clear device session pointer.
1911          * Include the flag indicating presence of private data
1912          */
1913         memset(sess, 0, session_size);
1914
1915         rte_cryptodev_trace_asym_session_create(mp, sess);
1916         return sess;
1917 }
1918
1919 int
1920 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1921                 struct rte_cryptodev_sym_session *sess)
1922 {
1923         struct rte_cryptodev *dev;
1924         uint8_t driver_id;
1925
1926         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1927                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1928                 return -EINVAL;
1929         }
1930
1931         dev = rte_cryptodev_pmd_get_dev(dev_id);
1932
1933         if (dev == NULL || sess == NULL)
1934                 return -EINVAL;
1935
1936         driver_id = dev->driver_id;
1937         if (sess->sess_data[driver_id].refcnt == 0)
1938                 return 0;
1939         if (--sess->sess_data[driver_id].refcnt != 0)
1940                 return -EBUSY;
1941
1942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1943
1944         dev->dev_ops->sym_session_clear(dev, sess);
1945
1946         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1947         return 0;
1948 }
1949
1950 int
1951 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1952                 struct rte_cryptodev_asym_session *sess)
1953 {
1954         struct rte_cryptodev *dev;
1955
1956         if (!rte_cryptodev_is_valid_dev(dev_id)) {
1957                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1958                 return -EINVAL;
1959         }
1960
1961         dev = rte_cryptodev_pmd_get_dev(dev_id);
1962
1963         if (dev == NULL || sess == NULL)
1964                 return -EINVAL;
1965
1966         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1967
1968         dev->dev_ops->asym_session_clear(dev, sess);
1969
1970         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1971         return 0;
1972 }
1973
1974 int
1975 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1976 {
1977         uint8_t i;
1978         struct rte_mempool *sess_mp;
1979
1980         if (sess == NULL)
1981                 return -EINVAL;
1982
1983         /* Check that all device private data has been freed */
1984         for (i = 0; i < sess->nb_drivers; i++) {
1985                 if (sess->sess_data[i].refcnt != 0)
1986                         return -EBUSY;
1987         }
1988
1989         /* Return session to mempool */
1990         sess_mp = rte_mempool_from_obj(sess);
1991         rte_mempool_put(sess_mp, sess);
1992
1993         rte_cryptodev_trace_sym_session_free(sess);
1994         return 0;
1995 }
1996
1997 int
1998 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1999 {
2000         uint8_t i;
2001         void *sess_priv;
2002         struct rte_mempool *sess_mp;
2003
2004         if (sess == NULL)
2005                 return -EINVAL;
2006
2007         /* Check that all device private data has been freed */
2008         for (i = 0; i < nb_drivers; i++) {
2009                 sess_priv = get_asym_session_private_data(sess, i);
2010                 if (sess_priv != NULL)
2011                         return -EBUSY;
2012         }
2013
2014         /* Return session to mempool */
2015         sess_mp = rte_mempool_from_obj(sess);
2016         rte_mempool_put(sess_mp, sess);
2017
2018         rte_cryptodev_trace_asym_session_free(sess);
2019         return 0;
2020 }
2021
2022 unsigned int
2023 rte_cryptodev_sym_get_header_session_size(void)
2024 {
2025         /*
2026          * Header contains pointers to the private data of all registered
2027          * drivers and all necessary information to ensure safely clear
2028          * or free al session.
2029          */
2030         struct rte_cryptodev_sym_session s = {0};
2031
2032         s.nb_drivers = nb_drivers;
2033
2034         return (unsigned int)(sizeof(s) +
2035                         rte_cryptodev_sym_session_data_size(&s));
2036 }
2037
2038 unsigned int
2039 rte_cryptodev_sym_get_existing_header_session_size(
2040                 struct rte_cryptodev_sym_session *sess)
2041 {
2042         if (!sess)
2043                 return 0;
2044         else
2045                 return (unsigned int)(sizeof(*sess) +
2046                                 rte_cryptodev_sym_session_data_size(sess));
2047 }
2048
2049 unsigned int
2050 rte_cryptodev_asym_get_header_session_size(void)
2051 {
2052         /*
2053          * Header contains pointers to the private data
2054          * of all registered drivers, and a flag which
2055          * indicates presence of private data
2056          */
2057         return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
2058 }
2059
2060 unsigned int
2061 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2062 {
2063         struct rte_cryptodev *dev;
2064         unsigned int priv_sess_size;
2065
2066         if (!rte_cryptodev_is_valid_dev(dev_id))
2067                 return 0;
2068
2069         dev = rte_cryptodev_pmd_get_dev(dev_id);
2070
2071         if (*dev->dev_ops->sym_session_get_size == NULL)
2072                 return 0;
2073
2074         priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2075
2076         return priv_sess_size;
2077 }
2078
2079 unsigned int
2080 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2081 {
2082         struct rte_cryptodev *dev;
2083         unsigned int header_size = sizeof(void *) * nb_drivers;
2084         unsigned int priv_sess_size;
2085
2086         if (!rte_cryptodev_is_valid_dev(dev_id))
2087                 return 0;
2088
2089         dev = rte_cryptodev_pmd_get_dev(dev_id);
2090
2091         if (*dev->dev_ops->asym_session_get_size == NULL)
2092                 return 0;
2093
2094         priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2095         if (priv_sess_size < header_size)
2096                 return header_size;
2097
2098         return priv_sess_size;
2099
2100 }
2101
2102 int
2103 rte_cryptodev_sym_session_set_user_data(
2104                                         struct rte_cryptodev_sym_session *sess,
2105                                         void *data,
2106                                         uint16_t size)
2107 {
2108         if (sess == NULL)
2109                 return -EINVAL;
2110
2111         if (sess->user_data_sz < size)
2112                 return -ENOMEM;
2113
2114         rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
2115         return 0;
2116 }
2117
2118 void *
2119 rte_cryptodev_sym_session_get_user_data(
2120                                         struct rte_cryptodev_sym_session *sess)
2121 {
2122         if (sess == NULL || sess->user_data_sz == 0)
2123                 return NULL;
2124
2125         return (void *)(sess->sess_data + sess->nb_drivers);
2126 }
2127
2128 static inline void
2129 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2130 {
2131         uint32_t i;
2132         for (i = 0; i < vec->num; i++)
2133                 vec->status[i] = errnum;
2134 }
2135
2136 uint32_t
2137 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2138         struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
2139         struct rte_crypto_sym_vec *vec)
2140 {
2141         struct rte_cryptodev *dev;
2142
2143         if (!rte_cryptodev_is_valid_dev(dev_id)) {
2144                 sym_crypto_fill_status(vec, EINVAL);
2145                 return 0;
2146         }
2147
2148         dev = rte_cryptodev_pmd_get_dev(dev_id);
2149
2150         if (*dev->dev_ops->sym_cpu_process == NULL ||
2151                 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2152                 sym_crypto_fill_status(vec, ENOTSUP);
2153                 return 0;
2154         }
2155
2156         return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2157 }
2158
2159 int
2160 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2161 {
2162         struct rte_cryptodev *dev;
2163         int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2164         int32_t priv_size;
2165
2166         if (!rte_cryptodev_is_valid_dev(dev_id))
2167                 return -EINVAL;
2168
2169         dev = rte_cryptodev_pmd_get_dev(dev_id);
2170
2171         if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2172                 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2173                 return -ENOTSUP;
2174         }
2175
2176         priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2177         if (priv_size < 0)
2178                 return -ENOTSUP;
2179
2180         return RTE_ALIGN_CEIL((size + priv_size), 8);
2181 }
2182
2183 int
2184 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2185         struct rte_crypto_raw_dp_ctx *ctx,
2186         enum rte_crypto_op_sess_type sess_type,
2187         union rte_cryptodev_session_ctx session_ctx,
2188         uint8_t is_update)
2189 {
2190         struct rte_cryptodev *dev;
2191
2192         if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2193                 return -EINVAL;
2194
2195         dev = rte_cryptodev_pmd_get_dev(dev_id);
2196         if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2197                         || dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2198                 return -ENOTSUP;
2199
2200         return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2201                         sess_type, session_ctx, is_update);
2202 }
2203
2204 uint32_t
2205 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2206         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2207         void **user_data, int *enqueue_status)
2208 {
2209         return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2210                         ofs, user_data, enqueue_status);
2211 }
2212
2213 int
2214 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2215                 uint32_t n)
2216 {
2217         return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2218 }
2219
2220 uint32_t
2221 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2222         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2223         uint32_t max_nb_to_dequeue,
2224         rte_cryptodev_raw_post_dequeue_t post_dequeue,
2225         void **out_user_data, uint8_t is_user_data_array,
2226         uint32_t *n_success_jobs, int *status)
2227 {
2228         return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2229                 get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2230                 out_user_data, is_user_data_array, n_success_jobs, status);
2231 }
2232
2233 int
2234 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2235                 uint32_t n)
2236 {
2237         return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2238 }
2239
2240 /** Initialise rte_crypto_op mempool element */
2241 static void
2242 rte_crypto_op_init(struct rte_mempool *mempool,
2243                 void *opaque_arg,
2244                 void *_op_data,
2245                 __rte_unused unsigned i)
2246 {
2247         struct rte_crypto_op *op = _op_data;
2248         enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2249
2250         memset(_op_data, 0, mempool->elt_size);
2251
2252         __rte_crypto_op_reset(op, type);
2253
2254         op->phys_addr = rte_mem_virt2iova(_op_data);
2255         op->mempool = mempool;
2256 }
2257
2258
2259 struct rte_mempool *
2260 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2261                 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2262                 int socket_id)
2263 {
2264         struct rte_crypto_op_pool_private *priv;
2265
2266         unsigned elt_size = sizeof(struct rte_crypto_op) +
2267                         priv_size;
2268
2269         if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2270                 elt_size += sizeof(struct rte_crypto_sym_op);
2271         } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2272                 elt_size += sizeof(struct rte_crypto_asym_op);
2273         } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2274                 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2275                                     sizeof(struct rte_crypto_asym_op));
2276         } else {
2277                 CDEV_LOG_ERR("Invalid op_type\n");
2278                 return NULL;
2279         }
2280
2281         /* lookup mempool in case already allocated */
2282         struct rte_mempool *mp = rte_mempool_lookup(name);
2283
2284         if (mp != NULL) {
2285                 priv = (struct rte_crypto_op_pool_private *)
2286                                 rte_mempool_get_priv(mp);
2287
2288                 if (mp->elt_size != elt_size ||
2289                                 mp->cache_size < cache_size ||
2290                                 mp->size < nb_elts ||
2291                                 priv->priv_size <  priv_size) {
2292                         mp = NULL;
2293                         CDEV_LOG_ERR("Mempool %s already exists but with "
2294                                         "incompatible parameters", name);
2295                         return NULL;
2296                 }
2297                 return mp;
2298         }
2299
2300         mp = rte_mempool_create(
2301                         name,
2302                         nb_elts,
2303                         elt_size,
2304                         cache_size,
2305                         sizeof(struct rte_crypto_op_pool_private),
2306                         NULL,
2307                         NULL,
2308                         rte_crypto_op_init,
2309                         &type,
2310                         socket_id,
2311                         0);
2312
2313         if (mp == NULL) {
2314                 CDEV_LOG_ERR("Failed to create mempool %s", name);
2315                 return NULL;
2316         }
2317
2318         priv = (struct rte_crypto_op_pool_private *)
2319                         rte_mempool_get_priv(mp);
2320
2321         priv->priv_size = priv_size;
2322         priv->type = type;
2323
2324         return mp;
2325 }
2326
2327 int
2328 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2329 {
2330         struct rte_cryptodev *dev = NULL;
2331         uint32_t i = 0;
2332
2333         if (name == NULL)
2334                 return -EINVAL;
2335
2336         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2337                 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2338                                 "%s_%u", dev_name_prefix, i);
2339
2340                 if (ret < 0)
2341                         return ret;
2342
2343                 dev = rte_cryptodev_pmd_get_named_dev(name);
2344                 if (!dev)
2345                         return 0;
2346         }
2347
2348         return -1;
2349 }
2350
2351 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2352
2353 static struct cryptodev_driver_list cryptodev_driver_list =
2354         TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2355
2356 int
2357 rte_cryptodev_driver_id_get(const char *name)
2358 {
2359         struct cryptodev_driver *driver;
2360         const char *driver_name;
2361
2362         if (name == NULL) {
2363                 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2364                 return -1;
2365         }
2366
2367         TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2368                 driver_name = driver->driver->name;
2369                 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2370                         return driver->id;
2371         }
2372         return -1;
2373 }
2374
2375 const char *
2376 rte_cryptodev_name_get(uint8_t dev_id)
2377 {
2378         struct rte_cryptodev *dev;
2379
2380         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2381                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2382                 return NULL;
2383         }
2384
2385         dev = rte_cryptodev_pmd_get_dev(dev_id);
2386         if (dev == NULL)
2387                 return NULL;
2388
2389         return dev->data->name;
2390 }
2391
2392 const char *
2393 rte_cryptodev_driver_name_get(uint8_t driver_id)
2394 {
2395         struct cryptodev_driver *driver;
2396
2397         TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2398                 if (driver->id == driver_id)
2399                         return driver->driver->name;
2400         return NULL;
2401 }
2402
2403 uint8_t
2404 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2405                 const struct rte_driver *drv)
2406 {
2407         crypto_drv->driver = drv;
2408         crypto_drv->id = nb_drivers;
2409
2410         TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2411
2412         return nb_drivers++;
2413 }