cryptodev: support multiple cipher data-units
[dpdk.git] / lib / librte_cryptodev / rte_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43 #include "rte_cryptodev_trace.h"
44
45 static uint8_t nb_drivers;
46
47 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
48
49 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
50
51 static struct rte_cryptodev_global cryptodev_globals = {
52                 .devs                   = rte_crypto_devices,
53                 .data                   = { NULL },
54                 .nb_devs                = 0
55 };
56
57 /* spinlock for crypto device callbacks */
58 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
59
60 /**
61  * The user application callback description.
62  *
63  * It contains callback address to be registered by user application,
64  * the pointer to the parameters for callback, and the event type.
65  */
66 struct rte_cryptodev_callback {
67         TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
68         rte_cryptodev_cb_fn cb_fn;              /**< Callback address */
69         void *cb_arg;                           /**< Parameter for callback */
70         enum rte_cryptodev_event_type event;    /**< Interrupt event type */
71         uint32_t active;                        /**< Callback is executing */
72 };
73
74 /**
75  * The crypto cipher algorithm strings identifiers.
76  * It could be used in application command line.
77  */
78 const char *
79 rte_crypto_cipher_algorithm_strings[] = {
80         [RTE_CRYPTO_CIPHER_3DES_CBC]    = "3des-cbc",
81         [RTE_CRYPTO_CIPHER_3DES_ECB]    = "3des-ecb",
82         [RTE_CRYPTO_CIPHER_3DES_CTR]    = "3des-ctr",
83
84         [RTE_CRYPTO_CIPHER_AES_CBC]     = "aes-cbc",
85         [RTE_CRYPTO_CIPHER_AES_CTR]     = "aes-ctr",
86         [RTE_CRYPTO_CIPHER_AES_DOCSISBPI]       = "aes-docsisbpi",
87         [RTE_CRYPTO_CIPHER_AES_ECB]     = "aes-ecb",
88         [RTE_CRYPTO_CIPHER_AES_F8]      = "aes-f8",
89         [RTE_CRYPTO_CIPHER_AES_XTS]     = "aes-xts",
90
91         [RTE_CRYPTO_CIPHER_ARC4]        = "arc4",
92
93         [RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
94         [RTE_CRYPTO_CIPHER_DES_DOCSISBPI]       = "des-docsisbpi",
95
96         [RTE_CRYPTO_CIPHER_NULL]        = "null",
97
98         [RTE_CRYPTO_CIPHER_KASUMI_F8]   = "kasumi-f8",
99         [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
100         [RTE_CRYPTO_CIPHER_ZUC_EEA3]    = "zuc-eea3"
101 };
102
103 /**
104  * The crypto cipher operation strings identifiers.
105  * It could be used in application command line.
106  */
107 const char *
108 rte_crypto_cipher_operation_strings[] = {
109                 [RTE_CRYPTO_CIPHER_OP_ENCRYPT]  = "encrypt",
110                 [RTE_CRYPTO_CIPHER_OP_DECRYPT]  = "decrypt"
111 };
112
113 /**
114  * The crypto auth algorithm strings identifiers.
115  * It could be used in application command line.
116  */
117 const char *
118 rte_crypto_auth_algorithm_strings[] = {
119         [RTE_CRYPTO_AUTH_AES_CBC_MAC]   = "aes-cbc-mac",
120         [RTE_CRYPTO_AUTH_AES_CMAC]      = "aes-cmac",
121         [RTE_CRYPTO_AUTH_AES_GMAC]      = "aes-gmac",
122         [RTE_CRYPTO_AUTH_AES_XCBC_MAC]  = "aes-xcbc-mac",
123
124         [RTE_CRYPTO_AUTH_MD5]           = "md5",
125         [RTE_CRYPTO_AUTH_MD5_HMAC]      = "md5-hmac",
126
127         [RTE_CRYPTO_AUTH_NULL]          = "null",
128
129         [RTE_CRYPTO_AUTH_SHA1]          = "sha1",
130         [RTE_CRYPTO_AUTH_SHA1_HMAC]     = "sha1-hmac",
131
132         [RTE_CRYPTO_AUTH_SHA224]        = "sha2-224",
133         [RTE_CRYPTO_AUTH_SHA224_HMAC]   = "sha2-224-hmac",
134         [RTE_CRYPTO_AUTH_SHA256]        = "sha2-256",
135         [RTE_CRYPTO_AUTH_SHA256_HMAC]   = "sha2-256-hmac",
136         [RTE_CRYPTO_AUTH_SHA384]        = "sha2-384",
137         [RTE_CRYPTO_AUTH_SHA384_HMAC]   = "sha2-384-hmac",
138         [RTE_CRYPTO_AUTH_SHA512]        = "sha2-512",
139         [RTE_CRYPTO_AUTH_SHA512_HMAC]   = "sha2-512-hmac",
140
141         [RTE_CRYPTO_AUTH_KASUMI_F9]     = "kasumi-f9",
142         [RTE_CRYPTO_AUTH_SNOW3G_UIA2]   = "snow3g-uia2",
143         [RTE_CRYPTO_AUTH_ZUC_EIA3]      = "zuc-eia3"
144 };
145
146 /**
147  * The crypto AEAD algorithm strings identifiers.
148  * It could be used in application command line.
149  */
150 const char *
151 rte_crypto_aead_algorithm_strings[] = {
152         [RTE_CRYPTO_AEAD_AES_CCM]       = "aes-ccm",
153         [RTE_CRYPTO_AEAD_AES_GCM]       = "aes-gcm",
154         [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
155 };
156
157 /**
158  * The crypto AEAD operation strings identifiers.
159  * It could be used in application command line.
160  */
161 const char *
162 rte_crypto_aead_operation_strings[] = {
163         [RTE_CRYPTO_AEAD_OP_ENCRYPT]    = "encrypt",
164         [RTE_CRYPTO_AEAD_OP_DECRYPT]    = "decrypt"
165 };
166
167 /**
168  * Asymmetric crypto transform operation strings identifiers.
169  */
170 const char *rte_crypto_asym_xform_strings[] = {
171         [RTE_CRYPTO_ASYM_XFORM_NONE]    = "none",
172         [RTE_CRYPTO_ASYM_XFORM_RSA]     = "rsa",
173         [RTE_CRYPTO_ASYM_XFORM_MODEX]   = "modexp",
174         [RTE_CRYPTO_ASYM_XFORM_MODINV]  = "modinv",
175         [RTE_CRYPTO_ASYM_XFORM_DH]      = "dh",
176         [RTE_CRYPTO_ASYM_XFORM_DSA]     = "dsa",
177         [RTE_CRYPTO_ASYM_XFORM_ECDSA]   = "ecdsa",
178         [RTE_CRYPTO_ASYM_XFORM_ECPM]    = "ecpm",
179 };
180
181 /**
182  * Asymmetric crypto operation strings identifiers.
183  */
184 const char *rte_crypto_asym_op_strings[] = {
185         [RTE_CRYPTO_ASYM_OP_ENCRYPT]    = "encrypt",
186         [RTE_CRYPTO_ASYM_OP_DECRYPT]    = "decrypt",
187         [RTE_CRYPTO_ASYM_OP_SIGN]       = "sign",
188         [RTE_CRYPTO_ASYM_OP_VERIFY]     = "verify",
189         [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]       = "priv_key_generate",
190         [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
191         [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
192 };
193
194 /**
195  * The private data structure stored in the session mempool private data.
196  */
197 struct rte_cryptodev_sym_session_pool_private_data {
198         uint16_t nb_drivers;
199         /**< number of elements in sess_data array */
200         uint16_t user_data_sz;
201         /**< session user data will be placed after sess_data */
202 };
203
204 int
205 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
206                 const char *algo_string)
207 {
208         unsigned int i;
209
210         for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
211                 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
212                         *algo_enum = (enum rte_crypto_cipher_algorithm) i;
213                         return 0;
214                 }
215         }
216
217         /* Invalid string */
218         return -1;
219 }
220
221 int
222 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
223                 const char *algo_string)
224 {
225         unsigned int i;
226
227         for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
228                 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
229                         *algo_enum = (enum rte_crypto_auth_algorithm) i;
230                         return 0;
231                 }
232         }
233
234         /* Invalid string */
235         return -1;
236 }
237
238 int
239 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
240                 const char *algo_string)
241 {
242         unsigned int i;
243
244         for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
245                 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
246                         *algo_enum = (enum rte_crypto_aead_algorithm) i;
247                         return 0;
248                 }
249         }
250
251         /* Invalid string */
252         return -1;
253 }
254
255 int
256 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
257                 const char *xform_string)
258 {
259         unsigned int i;
260
261         for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
262                 if (strcmp(xform_string,
263                         rte_crypto_asym_xform_strings[i]) == 0) {
264                         *xform_enum = (enum rte_crypto_asym_xform_type) i;
265                         return 0;
266                 }
267         }
268
269         /* Invalid string */
270         return -1;
271 }
272
273 /**
274  * The crypto auth operation strings identifiers.
275  * It could be used in application command line.
276  */
277 const char *
278 rte_crypto_auth_operation_strings[] = {
279                 [RTE_CRYPTO_AUTH_OP_VERIFY]     = "verify",
280                 [RTE_CRYPTO_AUTH_OP_GENERATE]   = "generate"
281 };
282
283 const struct rte_cryptodev_symmetric_capability *
284 rte_cryptodev_sym_capability_get(uint8_t dev_id,
285                 const struct rte_cryptodev_sym_capability_idx *idx)
286 {
287         const struct rte_cryptodev_capabilities *capability;
288         struct rte_cryptodev_info dev_info;
289         int i = 0;
290
291         rte_cryptodev_info_get(dev_id, &dev_info);
292
293         while ((capability = &dev_info.capabilities[i++])->op !=
294                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
295                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
296                         continue;
297
298                 if (capability->sym.xform_type != idx->type)
299                         continue;
300
301                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
302                         capability->sym.auth.algo == idx->algo.auth)
303                         return &capability->sym;
304
305                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
306                         capability->sym.cipher.algo == idx->algo.cipher)
307                         return &capability->sym;
308
309                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
310                                 capability->sym.aead.algo == idx->algo.aead)
311                         return &capability->sym;
312         }
313
314         return NULL;
315 }
316
317 static int
318 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
319 {
320         unsigned int next_size;
321
322         /* Check lower/upper bounds */
323         if (size < range->min)
324                 return -1;
325
326         if (size > range->max)
327                 return -1;
328
329         /* If range is actually only one value, size is correct */
330         if (range->increment == 0)
331                 return 0;
332
333         /* Check if value is one of the supported sizes */
334         for (next_size = range->min; next_size <= range->max;
335                         next_size += range->increment)
336                 if (size == next_size)
337                         return 0;
338
339         return -1;
340 }
341
342 const struct rte_cryptodev_asymmetric_xform_capability *
343 rte_cryptodev_asym_capability_get(uint8_t dev_id,
344                 const struct rte_cryptodev_asym_capability_idx *idx)
345 {
346         const struct rte_cryptodev_capabilities *capability;
347         struct rte_cryptodev_info dev_info;
348         unsigned int i = 0;
349
350         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
351         rte_cryptodev_info_get(dev_id, &dev_info);
352
353         while ((capability = &dev_info.capabilities[i++])->op !=
354                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
355                 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
356                         continue;
357
358                 if (capability->asym.xform_capa.xform_type == idx->type)
359                         return &capability->asym.xform_capa;
360         }
361         return NULL;
362 };
363
364 int
365 rte_cryptodev_sym_capability_check_cipher(
366                 const struct rte_cryptodev_symmetric_capability *capability,
367                 uint16_t key_size, uint16_t iv_size)
368 {
369         if (param_range_check(key_size, &capability->cipher.key_size) != 0)
370                 return -1;
371
372         if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
373                 return -1;
374
375         return 0;
376 }
377
378 int
379 rte_cryptodev_sym_capability_check_auth(
380                 const struct rte_cryptodev_symmetric_capability *capability,
381                 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
382 {
383         if (param_range_check(key_size, &capability->auth.key_size) != 0)
384                 return -1;
385
386         if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
387                 return -1;
388
389         if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
390                 return -1;
391
392         return 0;
393 }
394
395 int
396 rte_cryptodev_sym_capability_check_aead(
397                 const struct rte_cryptodev_symmetric_capability *capability,
398                 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
399                 uint16_t iv_size)
400 {
401         if (param_range_check(key_size, &capability->aead.key_size) != 0)
402                 return -1;
403
404         if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
405                 return -1;
406
407         if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
408                 return -1;
409
410         if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
411                 return -1;
412
413         return 0;
414 }
415 int
416 rte_cryptodev_asym_xform_capability_check_optype(
417         const struct rte_cryptodev_asymmetric_xform_capability *capability,
418         enum rte_crypto_asym_op_type op_type)
419 {
420         if (capability->op_types & (1 << op_type))
421                 return 1;
422
423         return 0;
424 }
425
426 int
427 rte_cryptodev_asym_xform_capability_check_modlen(
428         const struct rte_cryptodev_asymmetric_xform_capability *capability,
429         uint16_t modlen)
430 {
431         /* no need to check for limits, if min or max = 0 */
432         if (capability->modlen.min != 0) {
433                 if (modlen < capability->modlen.min)
434                         return -1;
435         }
436
437         if (capability->modlen.max != 0) {
438                 if (modlen > capability->modlen.max)
439                         return -1;
440         }
441
442         /* in any case, check if given modlen is module increment */
443         if (capability->modlen.increment != 0) {
444                 if (modlen % (capability->modlen.increment))
445                         return -1;
446         }
447
448         return 0;
449 }
450
451 /* spinlock for crypto device enq callbacks */
452 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
453
454 static void
455 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
456 {
457         struct rte_cryptodev_cb_rcu *list;
458         struct rte_cryptodev_cb *cb, *next;
459         uint16_t qp_id;
460
461         if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
462                 return;
463
464         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
465                 list = &dev->enq_cbs[qp_id];
466                 cb = list->next;
467                 while (cb != NULL) {
468                         next = cb->next;
469                         rte_free(cb);
470                         cb = next;
471                 }
472
473                 rte_free(list->qsbr);
474         }
475
476         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
477                 list = &dev->deq_cbs[qp_id];
478                 cb = list->next;
479                 while (cb != NULL) {
480                         next = cb->next;
481                         rte_free(cb);
482                         cb = next;
483                 }
484
485                 rte_free(list->qsbr);
486         }
487
488         rte_free(dev->enq_cbs);
489         dev->enq_cbs = NULL;
490         rte_free(dev->deq_cbs);
491         dev->deq_cbs = NULL;
492 }
493
494 static int
495 cryptodev_cb_init(struct rte_cryptodev *dev)
496 {
497         struct rte_cryptodev_cb_rcu *list;
498         struct rte_rcu_qsbr *qsbr;
499         uint16_t qp_id;
500         size_t size;
501
502         /* Max thread set to 1, as one DP thread accessing a queue-pair */
503         const uint32_t max_threads = 1;
504
505         dev->enq_cbs = rte_zmalloc(NULL,
506                                    sizeof(struct rte_cryptodev_cb_rcu) *
507                                    dev->data->nb_queue_pairs, 0);
508         if (dev->enq_cbs == NULL) {
509                 CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
510                 return -ENOMEM;
511         }
512
513         dev->deq_cbs = rte_zmalloc(NULL,
514                                    sizeof(struct rte_cryptodev_cb_rcu) *
515                                    dev->data->nb_queue_pairs, 0);
516         if (dev->deq_cbs == NULL) {
517                 CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
518                 rte_free(dev->enq_cbs);
519                 return -ENOMEM;
520         }
521
522         /* Create RCU QSBR variable */
523         size = rte_rcu_qsbr_get_memsize(max_threads);
524
525         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
526                 list = &dev->enq_cbs[qp_id];
527                 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
528                 if (qsbr == NULL) {
529                         CDEV_LOG_ERR("Failed to allocate memory for RCU on "
530                                 "queue_pair_id=%d", qp_id);
531                         goto cb_init_err;
532                 }
533
534                 if (rte_rcu_qsbr_init(qsbr, max_threads)) {
535                         CDEV_LOG_ERR("Failed to initialize for RCU on "
536                                 "queue_pair_id=%d", qp_id);
537                         goto cb_init_err;
538                 }
539
540                 list->qsbr = qsbr;
541         }
542
543         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
544                 list = &dev->deq_cbs[qp_id];
545                 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
546                 if (qsbr == NULL) {
547                         CDEV_LOG_ERR("Failed to allocate memory for RCU on "
548                                 "queue_pair_id=%d", qp_id);
549                         goto cb_init_err;
550                 }
551
552                 if (rte_rcu_qsbr_init(qsbr, max_threads)) {
553                         CDEV_LOG_ERR("Failed to initialize for RCU on "
554                                 "queue_pair_id=%d", qp_id);
555                         goto cb_init_err;
556                 }
557
558                 list->qsbr = qsbr;
559         }
560
561         return 0;
562
563 cb_init_err:
564         cryptodev_cb_cleanup(dev);
565         return -ENOMEM;
566 }
567
568 const char *
569 rte_cryptodev_get_feature_name(uint64_t flag)
570 {
571         switch (flag) {
572         case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
573                 return "SYMMETRIC_CRYPTO";
574         case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
575                 return "ASYMMETRIC_CRYPTO";
576         case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
577                 return "SYM_OPERATION_CHAINING";
578         case RTE_CRYPTODEV_FF_CPU_SSE:
579                 return "CPU_SSE";
580         case RTE_CRYPTODEV_FF_CPU_AVX:
581                 return "CPU_AVX";
582         case RTE_CRYPTODEV_FF_CPU_AVX2:
583                 return "CPU_AVX2";
584         case RTE_CRYPTODEV_FF_CPU_AVX512:
585                 return "CPU_AVX512";
586         case RTE_CRYPTODEV_FF_CPU_AESNI:
587                 return "CPU_AESNI";
588         case RTE_CRYPTODEV_FF_HW_ACCELERATED:
589                 return "HW_ACCELERATED";
590         case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
591                 return "IN_PLACE_SGL";
592         case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
593                 return "OOP_SGL_IN_SGL_OUT";
594         case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
595                 return "OOP_SGL_IN_LB_OUT";
596         case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
597                 return "OOP_LB_IN_SGL_OUT";
598         case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
599                 return "OOP_LB_IN_LB_OUT";
600         case RTE_CRYPTODEV_FF_CPU_NEON:
601                 return "CPU_NEON";
602         case RTE_CRYPTODEV_FF_CPU_ARM_CE:
603                 return "CPU_ARM_CE";
604         case RTE_CRYPTODEV_FF_SECURITY:
605                 return "SECURITY_PROTOCOL";
606         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
607                 return "RSA_PRIV_OP_KEY_EXP";
608         case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
609                 return "RSA_PRIV_OP_KEY_QT";
610         case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
611                 return "DIGEST_ENCRYPTED";
612         case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
613                 return "SYM_CPU_CRYPTO";
614         case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
615                 return "ASYM_SESSIONLESS";
616         case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
617                 return "SYM_SESSIONLESS";
618         case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
619                 return "NON_BYTE_ALIGNED_DATA";
620         case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
621                 return "CIPHER_MULTIPLE_DATA_UNITS";
622         default:
623                 return NULL;
624         }
625 }
626
627 struct rte_cryptodev *
628 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
629 {
630         return &cryptodev_globals.devs[dev_id];
631 }
632
633 struct rte_cryptodev *
634 rte_cryptodev_pmd_get_named_dev(const char *name)
635 {
636         struct rte_cryptodev *dev;
637         unsigned int i;
638
639         if (name == NULL)
640                 return NULL;
641
642         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
643                 dev = &cryptodev_globals.devs[i];
644
645                 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
646                                 (strcmp(dev->data->name, name) == 0))
647                         return dev;
648         }
649
650         return NULL;
651 }
652
653 static inline uint8_t
654 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
655 {
656         if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
657                         rte_crypto_devices[dev_id].data == NULL)
658                 return 0;
659
660         return 1;
661 }
662
663 unsigned int
664 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
665 {
666         struct rte_cryptodev *dev = NULL;
667
668         if (!rte_cryptodev_is_valid_device_data(dev_id))
669                 return 0;
670
671         dev = rte_cryptodev_pmd_get_dev(dev_id);
672         if (dev->attached != RTE_CRYPTODEV_ATTACHED)
673                 return 0;
674         else
675                 return 1;
676 }
677
678
679 int
680 rte_cryptodev_get_dev_id(const char *name)
681 {
682         unsigned i;
683
684         if (name == NULL)
685                 return -1;
686
687         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
688                 if (!rte_cryptodev_is_valid_device_data(i))
689                         continue;
690                 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
691                                 == 0) &&
692                                 (cryptodev_globals.devs[i].attached ==
693                                                 RTE_CRYPTODEV_ATTACHED))
694                         return i;
695         }
696
697         return -1;
698 }
699
700 uint8_t
701 rte_cryptodev_count(void)
702 {
703         return cryptodev_globals.nb_devs;
704 }
705
706 uint8_t
707 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
708 {
709         uint8_t i, dev_count = 0;
710
711         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
712                 if (cryptodev_globals.devs[i].driver_id == driver_id &&
713                         cryptodev_globals.devs[i].attached ==
714                                         RTE_CRYPTODEV_ATTACHED)
715                         dev_count++;
716
717         return dev_count;
718 }
719
720 uint8_t
721 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
722         uint8_t nb_devices)
723 {
724         uint8_t i, count = 0;
725         struct rte_cryptodev *devs = cryptodev_globals.devs;
726
727         for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
728                 if (!rte_cryptodev_is_valid_device_data(i))
729                         continue;
730
731                 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
732                         int cmp;
733
734                         cmp = strncmp(devs[i].device->driver->name,
735                                         driver_name,
736                                         strlen(driver_name) + 1);
737
738                         if (cmp == 0)
739                                 devices[count++] = devs[i].data->dev_id;
740                 }
741         }
742
743         return count;
744 }
745
746 void *
747 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
748 {
749         if (dev_id < RTE_CRYPTO_MAX_DEVS &&
750                         (rte_crypto_devices[dev_id].feature_flags &
751                         RTE_CRYPTODEV_FF_SECURITY))
752                 return rte_crypto_devices[dev_id].security_ctx;
753
754         return NULL;
755 }
756
757 int
758 rte_cryptodev_socket_id(uint8_t dev_id)
759 {
760         struct rte_cryptodev *dev;
761
762         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
763                 return -1;
764
765         dev = rte_cryptodev_pmd_get_dev(dev_id);
766
767         return dev->data->socket_id;
768 }
769
770 static inline int
771 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
772                 int socket_id)
773 {
774         char mz_name[RTE_MEMZONE_NAMESIZE];
775         const struct rte_memzone *mz;
776         int n;
777
778         /* generate memzone name */
779         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
780         if (n >= (int)sizeof(mz_name))
781                 return -EINVAL;
782
783         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
784                 mz = rte_memzone_reserve(mz_name,
785                                 sizeof(struct rte_cryptodev_data),
786                                 socket_id, 0);
787                 CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
788                                 mz_name, mz);
789         } else {
790                 mz = rte_memzone_lookup(mz_name);
791                 CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
792                                 mz_name, mz);
793         }
794
795         if (mz == NULL)
796                 return -ENOMEM;
797
798         *data = mz->addr;
799         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
800                 memset(*data, 0, sizeof(struct rte_cryptodev_data));
801
802         return 0;
803 }
804
805 static inline int
806 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
807 {
808         char mz_name[RTE_MEMZONE_NAMESIZE];
809         const struct rte_memzone *mz;
810         int n;
811
812         /* generate memzone name */
813         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
814         if (n >= (int)sizeof(mz_name))
815                 return -EINVAL;
816
817         mz = rte_memzone_lookup(mz_name);
818         if (mz == NULL)
819                 return -ENOMEM;
820
821         RTE_ASSERT(*data == mz->addr);
822         *data = NULL;
823
824         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
825                 CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
826                                 mz_name, mz);
827                 return rte_memzone_free(mz);
828         } else {
829                 CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
830                                 mz_name, mz);
831         }
832
833         return 0;
834 }
835
836 static uint8_t
837 rte_cryptodev_find_free_device_index(void)
838 {
839         uint8_t dev_id;
840
841         for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
842                 if (rte_crypto_devices[dev_id].attached ==
843                                 RTE_CRYPTODEV_DETACHED)
844                         return dev_id;
845         }
846         return RTE_CRYPTO_MAX_DEVS;
847 }
848
849 struct rte_cryptodev *
850 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
851 {
852         struct rte_cryptodev *cryptodev;
853         uint8_t dev_id;
854
855         if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
856                 CDEV_LOG_ERR("Crypto device with name %s already "
857                                 "allocated!", name);
858                 return NULL;
859         }
860
861         dev_id = rte_cryptodev_find_free_device_index();
862         if (dev_id == RTE_CRYPTO_MAX_DEVS) {
863                 CDEV_LOG_ERR("Reached maximum number of crypto devices");
864                 return NULL;
865         }
866
867         cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
868
869         if (cryptodev->data == NULL) {
870                 struct rte_cryptodev_data **cryptodev_data =
871                                 &cryptodev_globals.data[dev_id];
872
873                 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
874                                 socket_id);
875
876                 if (retval < 0 || *cryptodev_data == NULL)
877                         return NULL;
878
879                 cryptodev->data = *cryptodev_data;
880
881                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
882                         strlcpy(cryptodev->data->name, name,
883                                 RTE_CRYPTODEV_NAME_MAX_LEN);
884
885                         cryptodev->data->dev_id = dev_id;
886                         cryptodev->data->socket_id = socket_id;
887                         cryptodev->data->dev_started = 0;
888                         CDEV_LOG_DEBUG("PRIMARY:init data");
889                 }
890
891                 CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
892                                 cryptodev->data->name,
893                                 cryptodev->data->dev_id,
894                                 cryptodev->data->socket_id,
895                                 cryptodev->data->dev_started);
896
897                 /* init user callbacks */
898                 TAILQ_INIT(&(cryptodev->link_intr_cbs));
899
900                 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
901
902                 cryptodev_globals.nb_devs++;
903         }
904
905         return cryptodev;
906 }
907
908 int
909 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
910 {
911         int ret;
912         uint8_t dev_id;
913
914         if (cryptodev == NULL)
915                 return -EINVAL;
916
917         dev_id = cryptodev->data->dev_id;
918
919         /* Close device only if device operations have been set */
920         if (cryptodev->dev_ops) {
921                 ret = rte_cryptodev_close(dev_id);
922                 if (ret < 0)
923                         return ret;
924         }
925
926         ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
927         if (ret < 0)
928                 return ret;
929
930         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
931         cryptodev_globals.nb_devs--;
932         return 0;
933 }
934
935 uint16_t
936 rte_cryptodev_queue_pair_count(uint8_t dev_id)
937 {
938         struct rte_cryptodev *dev;
939
940         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
941                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
942                 return 0;
943         }
944
945         dev = &rte_crypto_devices[dev_id];
946         return dev->data->nb_queue_pairs;
947 }
948
949 static int
950 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
951                 int socket_id)
952 {
953         struct rte_cryptodev_info dev_info;
954         void **qp;
955         unsigned i;
956
957         if ((dev == NULL) || (nb_qpairs < 1)) {
958                 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
959                                                         dev, nb_qpairs);
960                 return -EINVAL;
961         }
962
963         CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
964                         nb_qpairs, dev->data->dev_id);
965
966         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
967
968         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
969         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
970
971         if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
972                 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
973                                 nb_qpairs, dev->data->dev_id);
974             return -EINVAL;
975         }
976
977         if (dev->data->queue_pairs == NULL) { /* first time configuration */
978                 dev->data->queue_pairs = rte_zmalloc_socket(
979                                 "cryptodev->queue_pairs",
980                                 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
981                                 RTE_CACHE_LINE_SIZE, socket_id);
982
983                 if (dev->data->queue_pairs == NULL) {
984                         dev->data->nb_queue_pairs = 0;
985                         CDEV_LOG_ERR("failed to get memory for qp meta data, "
986                                                         "nb_queues %u",
987                                                         nb_qpairs);
988                         return -(ENOMEM);
989                 }
990         } else { /* re-configure */
991                 int ret;
992                 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
993
994                 qp = dev->data->queue_pairs;
995
996                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
997                                 -ENOTSUP);
998
999                 for (i = nb_qpairs; i < old_nb_queues; i++) {
1000                         ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1001                         if (ret < 0)
1002                                 return ret;
1003                 }
1004
1005                 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
1006                                 RTE_CACHE_LINE_SIZE);
1007                 if (qp == NULL) {
1008                         CDEV_LOG_ERR("failed to realloc qp meta data,"
1009                                                 " nb_queues %u", nb_qpairs);
1010                         return -(ENOMEM);
1011                 }
1012
1013                 if (nb_qpairs > old_nb_queues) {
1014                         uint16_t new_qs = nb_qpairs - old_nb_queues;
1015
1016                         memset(qp + old_nb_queues, 0,
1017                                 sizeof(qp[0]) * new_qs);
1018                 }
1019
1020                 dev->data->queue_pairs = qp;
1021
1022         }
1023         dev->data->nb_queue_pairs = nb_qpairs;
1024         return 0;
1025 }
1026
1027 int
1028 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1029 {
1030         struct rte_cryptodev *dev;
1031         int diag;
1032
1033         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1034                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1035                 return -EINVAL;
1036         }
1037
1038         dev = &rte_crypto_devices[dev_id];
1039
1040         if (dev->data->dev_started) {
1041                 CDEV_LOG_ERR(
1042                     "device %d must be stopped to allow configuration", dev_id);
1043                 return -EBUSY;
1044         }
1045
1046         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1047
1048         rte_spinlock_lock(&rte_cryptodev_callback_lock);
1049         cryptodev_cb_cleanup(dev);
1050         rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1051
1052         /* Setup new number of queue pairs and reconfigure device. */
1053         diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1054                         config->socket_id);
1055         if (diag != 0) {
1056                 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1057                                 dev_id, diag);
1058                 return diag;
1059         }
1060
1061         rte_spinlock_lock(&rte_cryptodev_callback_lock);
1062         diag = cryptodev_cb_init(dev);
1063         rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1064         if (diag) {
1065                 CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1066                 return diag;
1067         }
1068
1069         rte_cryptodev_trace_configure(dev_id, config);
1070         return (*dev->dev_ops->dev_configure)(dev, config);
1071 }
1072
1073 int
1074 rte_cryptodev_start(uint8_t dev_id)
1075 {
1076         struct rte_cryptodev *dev;
1077         int diag;
1078
1079         CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1080
1081         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1082                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1083                 return -EINVAL;
1084         }
1085
1086         dev = &rte_crypto_devices[dev_id];
1087
1088         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1089
1090         if (dev->data->dev_started != 0) {
1091                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1092                         dev_id);
1093                 return 0;
1094         }
1095
1096         diag = (*dev->dev_ops->dev_start)(dev);
1097         rte_cryptodev_trace_start(dev_id, diag);
1098         if (diag == 0)
1099                 dev->data->dev_started = 1;
1100         else
1101                 return diag;
1102
1103         return 0;
1104 }
1105
1106 void
1107 rte_cryptodev_stop(uint8_t dev_id)
1108 {
1109         struct rte_cryptodev *dev;
1110
1111         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1112                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1113                 return;
1114         }
1115
1116         dev = &rte_crypto_devices[dev_id];
1117
1118         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1119
1120         if (dev->data->dev_started == 0) {
1121                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1122                         dev_id);
1123                 return;
1124         }
1125
1126         (*dev->dev_ops->dev_stop)(dev);
1127         rte_cryptodev_trace_stop(dev_id);
1128         dev->data->dev_started = 0;
1129 }
1130
1131 int
1132 rte_cryptodev_close(uint8_t dev_id)
1133 {
1134         struct rte_cryptodev *dev;
1135         int retval;
1136
1137         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1138                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1139                 return -1;
1140         }
1141
1142         dev = &rte_crypto_devices[dev_id];
1143
1144         /* Device must be stopped before it can be closed */
1145         if (dev->data->dev_started == 1) {
1146                 CDEV_LOG_ERR("Device %u must be stopped before closing",
1147                                 dev_id);
1148                 return -EBUSY;
1149         }
1150
1151         /* We can't close the device if there are outstanding sessions in use */
1152         if (dev->data->session_pool != NULL) {
1153                 if (!rte_mempool_full(dev->data->session_pool)) {
1154                         CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1155                                         "has sessions still in use, free "
1156                                         "all sessions before calling close",
1157                                         (unsigned)dev_id);
1158                         return -EBUSY;
1159                 }
1160         }
1161
1162         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1163         retval = (*dev->dev_ops->dev_close)(dev);
1164         rte_cryptodev_trace_close(dev_id, retval);
1165
1166         if (retval < 0)
1167                 return retval;
1168
1169         return 0;
1170 }
1171
1172 int
1173 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1174 {
1175         struct rte_cryptodev *dev;
1176
1177         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1178                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1179                 return -EINVAL;
1180         }
1181
1182         dev = &rte_crypto_devices[dev_id];
1183         if (queue_pair_id >= dev->data->nb_queue_pairs) {
1184                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1185                 return -EINVAL;
1186         }
1187         void **qps = dev->data->queue_pairs;
1188
1189         if (qps[queue_pair_id]) {
1190                 CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1191                         queue_pair_id, dev_id);
1192                 return 1;
1193         }
1194
1195         CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1196                 queue_pair_id, dev_id);
1197
1198         return 0;
1199 }
1200
1201 int
1202 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1203                 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1204
1205 {
1206         struct rte_cryptodev *dev;
1207
1208         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1209                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1210                 return -EINVAL;
1211         }
1212
1213         dev = &rte_crypto_devices[dev_id];
1214         if (queue_pair_id >= dev->data->nb_queue_pairs) {
1215                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1216                 return -EINVAL;
1217         }
1218
1219         if (!qp_conf) {
1220                 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1221                 return -EINVAL;
1222         }
1223
1224         if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1225                         (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1226                 CDEV_LOG_ERR("Invalid mempools\n");
1227                 return -EINVAL;
1228         }
1229
1230         if (qp_conf->mp_session) {
1231                 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1232                 uint32_t obj_size = qp_conf->mp_session->elt_size;
1233                 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1234                 struct rte_cryptodev_sym_session s = {0};
1235
1236                 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1237                 if (!pool_priv || qp_conf->mp_session->private_data_size <
1238                                 sizeof(*pool_priv)) {
1239                         CDEV_LOG_ERR("Invalid mempool\n");
1240                         return -EINVAL;
1241                 }
1242
1243                 s.nb_drivers = pool_priv->nb_drivers;
1244                 s.user_data_sz = pool_priv->user_data_sz;
1245
1246                 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1247                         obj_size) || (s.nb_drivers <= dev->driver_id) ||
1248                         rte_cryptodev_sym_get_private_session_size(dev_id) >
1249                                 obj_priv_size) {
1250                         CDEV_LOG_ERR("Invalid mempool\n");
1251                         return -EINVAL;
1252                 }
1253         }
1254
1255         if (dev->data->dev_started) {
1256                 CDEV_LOG_ERR(
1257                     "device %d must be stopped to allow configuration", dev_id);
1258                 return -EBUSY;
1259         }
1260
1261         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1262
1263         rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1264         return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1265                         socket_id);
1266 }
1267
1268 struct rte_cryptodev_cb *
1269 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1270                                uint16_t qp_id,
1271                                rte_cryptodev_callback_fn cb_fn,
1272                                void *cb_arg)
1273 {
1274         struct rte_cryptodev *dev;
1275         struct rte_cryptodev_cb_rcu *list;
1276         struct rte_cryptodev_cb *cb, *tail;
1277
1278         if (!cb_fn) {
1279                 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1280                 rte_errno = EINVAL;
1281                 return NULL;
1282         }
1283
1284         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1285                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1286                 rte_errno = ENODEV;
1287                 return NULL;
1288         }
1289
1290         dev = &rte_crypto_devices[dev_id];
1291         if (qp_id >= dev->data->nb_queue_pairs) {
1292                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1293                 rte_errno = ENODEV;
1294                 return NULL;
1295         }
1296
1297         cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1298         if (cb == NULL) {
1299                 CDEV_LOG_ERR("Failed to allocate memory for callback on "
1300                              "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1301                 rte_errno = ENOMEM;
1302                 return NULL;
1303         }
1304
1305         rte_spinlock_lock(&rte_cryptodev_callback_lock);
1306
1307         cb->fn = cb_fn;
1308         cb->arg = cb_arg;
1309
1310         /* Add the callbacks in fifo order. */
1311         list = &dev->enq_cbs[qp_id];
1312         tail = list->next;
1313
1314         if (tail) {
1315                 while (tail->next)
1316                         tail = tail->next;
1317                 /* Stores to cb->fn and cb->param should complete before
1318                  * cb is visible to data plane.
1319                  */
1320                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1321         } else {
1322                 /* Stores to cb->fn and cb->param should complete before
1323                  * cb is visible to data plane.
1324                  */
1325                 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1326         }
1327
1328         rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1329
1330         return cb;
1331 }
1332
1333 int
1334 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1335                                   uint16_t qp_id,
1336                                   struct rte_cryptodev_cb *cb)
1337 {
1338         struct rte_cryptodev *dev;
1339         struct rte_cryptodev_cb **prev_cb, *curr_cb;
1340         struct rte_cryptodev_cb_rcu *list;
1341         int ret;
1342
1343         ret = -EINVAL;
1344
1345         if (!cb) {
1346                 CDEV_LOG_ERR("Callback is NULL");
1347                 return -EINVAL;
1348         }
1349
1350         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1351                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1352                 return -ENODEV;
1353         }
1354
1355         dev = &rte_crypto_devices[dev_id];
1356         if (qp_id >= dev->data->nb_queue_pairs) {
1357                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1358                 return -ENODEV;
1359         }
1360
1361         rte_spinlock_lock(&rte_cryptodev_callback_lock);
1362         if (dev->enq_cbs == NULL) {
1363                 CDEV_LOG_ERR("Callback not initialized");
1364                 goto cb_err;
1365         }
1366
1367         list = &dev->enq_cbs[qp_id];
1368         if (list == NULL) {
1369                 CDEV_LOG_ERR("Callback list is NULL");
1370                 goto cb_err;
1371         }
1372
1373         if (list->qsbr == NULL) {
1374                 CDEV_LOG_ERR("Rcu qsbr is NULL");
1375                 goto cb_err;
1376         }
1377
1378         prev_cb = &list->next;
1379         for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1380                 curr_cb = *prev_cb;
1381                 if (curr_cb == cb) {
1382                         /* Remove the user cb from the callback list. */
1383                         __atomic_store_n(prev_cb, curr_cb->next,
1384                                 __ATOMIC_RELAXED);
1385                         ret = 0;
1386                         break;
1387                 }
1388         }
1389
1390         if (!ret) {
1391                 /* Call sync with invalid thread id as this is part of
1392                  * control plane API
1393                  */
1394                 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1395                 rte_free(cb);
1396         }
1397
1398 cb_err:
1399         rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1400         return ret;
1401 }
1402
1403 struct rte_cryptodev_cb *
1404 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1405                                uint16_t qp_id,
1406                                rte_cryptodev_callback_fn cb_fn,
1407                                void *cb_arg)
1408 {
1409         struct rte_cryptodev *dev;
1410         struct rte_cryptodev_cb_rcu *list;
1411         struct rte_cryptodev_cb *cb, *tail;
1412
1413         if (!cb_fn) {
1414                 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1415                 rte_errno = EINVAL;
1416                 return NULL;
1417         }
1418
1419         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1420                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1421                 rte_errno = ENODEV;
1422                 return NULL;
1423         }
1424
1425         dev = &rte_crypto_devices[dev_id];
1426         if (qp_id >= dev->data->nb_queue_pairs) {
1427                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1428                 rte_errno = ENODEV;
1429                 return NULL;
1430         }
1431
1432         cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1433         if (cb == NULL) {
1434                 CDEV_LOG_ERR("Failed to allocate memory for callback on "
1435                              "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1436                 rte_errno = ENOMEM;
1437                 return NULL;
1438         }
1439
1440         rte_spinlock_lock(&rte_cryptodev_callback_lock);
1441
1442         cb->fn = cb_fn;
1443         cb->arg = cb_arg;
1444
1445         /* Add the callbacks in fifo order. */
1446         list = &dev->deq_cbs[qp_id];
1447         tail = list->next;
1448
1449         if (tail) {
1450                 while (tail->next)
1451                         tail = tail->next;
1452                 /* Stores to cb->fn and cb->param should complete before
1453                  * cb is visible to data plane.
1454                  */
1455                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1456         } else {
1457                 /* Stores to cb->fn and cb->param should complete before
1458                  * cb is visible to data plane.
1459                  */
1460                 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1461         }
1462
1463         rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1464
1465         return cb;
1466 }
1467
1468 int
1469 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1470                                   uint16_t qp_id,
1471                                   struct rte_cryptodev_cb *cb)
1472 {
1473         struct rte_cryptodev *dev;
1474         struct rte_cryptodev_cb **prev_cb, *curr_cb;
1475         struct rte_cryptodev_cb_rcu *list;
1476         int ret;
1477
1478         ret = -EINVAL;
1479
1480         if (!cb) {
1481                 CDEV_LOG_ERR("Callback is NULL");
1482                 return -EINVAL;
1483         }
1484
1485         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1486                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1487                 return -ENODEV;
1488         }
1489
1490         dev = &rte_crypto_devices[dev_id];
1491         if (qp_id >= dev->data->nb_queue_pairs) {
1492                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1493                 return -ENODEV;
1494         }
1495
1496         rte_spinlock_lock(&rte_cryptodev_callback_lock);
1497         if (dev->enq_cbs == NULL) {
1498                 CDEV_LOG_ERR("Callback not initialized");
1499                 goto cb_err;
1500         }
1501
1502         list = &dev->deq_cbs[qp_id];
1503         if (list == NULL) {
1504                 CDEV_LOG_ERR("Callback list is NULL");
1505                 goto cb_err;
1506         }
1507
1508         if (list->qsbr == NULL) {
1509                 CDEV_LOG_ERR("Rcu qsbr is NULL");
1510                 goto cb_err;
1511         }
1512
1513         prev_cb = &list->next;
1514         for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1515                 curr_cb = *prev_cb;
1516                 if (curr_cb == cb) {
1517                         /* Remove the user cb from the callback list. */
1518                         __atomic_store_n(prev_cb, curr_cb->next,
1519                                 __ATOMIC_RELAXED);
1520                         ret = 0;
1521                         break;
1522                 }
1523         }
1524
1525         if (!ret) {
1526                 /* Call sync with invalid thread id as this is part of
1527                  * control plane API
1528                  */
1529                 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1530                 rte_free(cb);
1531         }
1532
1533 cb_err:
1534         rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1535         return ret;
1536 }
1537
1538 int
1539 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1540 {
1541         struct rte_cryptodev *dev;
1542
1543         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1544                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1545                 return -ENODEV;
1546         }
1547
1548         if (stats == NULL) {
1549                 CDEV_LOG_ERR("Invalid stats ptr");
1550                 return -EINVAL;
1551         }
1552
1553         dev = &rte_crypto_devices[dev_id];
1554         memset(stats, 0, sizeof(*stats));
1555
1556         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1557         (*dev->dev_ops->stats_get)(dev, stats);
1558         return 0;
1559 }
1560
1561 void
1562 rte_cryptodev_stats_reset(uint8_t dev_id)
1563 {
1564         struct rte_cryptodev *dev;
1565
1566         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1567                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1568                 return;
1569         }
1570
1571         dev = &rte_crypto_devices[dev_id];
1572
1573         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1574         (*dev->dev_ops->stats_reset)(dev);
1575 }
1576
1577 void
1578 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1579 {
1580         struct rte_cryptodev *dev;
1581
1582         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1583                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1584                 return;
1585         }
1586
1587         dev = &rte_crypto_devices[dev_id];
1588
1589         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1590
1591         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1592         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1593
1594         dev_info->driver_name = dev->device->driver->name;
1595         dev_info->device = dev->device;
1596 }
1597
1598 int
1599 rte_cryptodev_callback_register(uint8_t dev_id,
1600                         enum rte_cryptodev_event_type event,
1601                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1602 {
1603         struct rte_cryptodev *dev;
1604         struct rte_cryptodev_callback *user_cb;
1605
1606         if (!cb_fn)
1607                 return -EINVAL;
1608
1609         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1610                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1611                 return -EINVAL;
1612         }
1613
1614         dev = &rte_crypto_devices[dev_id];
1615         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1616
1617         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1618                 if (user_cb->cb_fn == cb_fn &&
1619                         user_cb->cb_arg == cb_arg &&
1620                         user_cb->event == event) {
1621                         break;
1622                 }
1623         }
1624
1625         /* create a new callback. */
1626         if (user_cb == NULL) {
1627                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1628                                 sizeof(struct rte_cryptodev_callback), 0);
1629                 if (user_cb != NULL) {
1630                         user_cb->cb_fn = cb_fn;
1631                         user_cb->cb_arg = cb_arg;
1632                         user_cb->event = event;
1633                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1634                 }
1635         }
1636
1637         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1638         return (user_cb == NULL) ? -ENOMEM : 0;
1639 }
1640
1641 int
1642 rte_cryptodev_callback_unregister(uint8_t dev_id,
1643                         enum rte_cryptodev_event_type event,
1644                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1645 {
1646         int ret;
1647         struct rte_cryptodev *dev;
1648         struct rte_cryptodev_callback *cb, *next;
1649
1650         if (!cb_fn)
1651                 return -EINVAL;
1652
1653         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1654                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1655                 return -EINVAL;
1656         }
1657
1658         dev = &rte_crypto_devices[dev_id];
1659         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1660
1661         ret = 0;
1662         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1663
1664                 next = TAILQ_NEXT(cb, next);
1665
1666                 if (cb->cb_fn != cb_fn || cb->event != event ||
1667                                 (cb->cb_arg != (void *)-1 &&
1668                                 cb->cb_arg != cb_arg))
1669                         continue;
1670
1671                 /*
1672                  * if this callback is not executing right now,
1673                  * then remove it.
1674                  */
1675                 if (cb->active == 0) {
1676                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1677                         rte_free(cb);
1678                 } else {
1679                         ret = -EAGAIN;
1680                 }
1681         }
1682
1683         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1684         return ret;
1685 }
1686
1687 void
1688 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1689         enum rte_cryptodev_event_type event)
1690 {
1691         struct rte_cryptodev_callback *cb_lst;
1692         struct rte_cryptodev_callback dev_cb;
1693
1694         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1695         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1696                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1697                         continue;
1698                 dev_cb = *cb_lst;
1699                 cb_lst->active = 1;
1700                 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1701                 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1702                                                 dev_cb.cb_arg);
1703                 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1704                 cb_lst->active = 0;
1705         }
1706         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1707 }
1708
1709 int
1710 rte_cryptodev_sym_session_init(uint8_t dev_id,
1711                 struct rte_cryptodev_sym_session *sess,
1712                 struct rte_crypto_sym_xform *xforms,
1713                 struct rte_mempool *mp)
1714 {
1715         struct rte_cryptodev *dev;
1716         uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1717                         dev_id);
1718         uint8_t index;
1719         int ret;
1720
1721         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1722                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1723                 return -EINVAL;
1724         }
1725
1726         dev = rte_cryptodev_pmd_get_dev(dev_id);
1727
1728         if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1729                 return -EINVAL;
1730
1731         if (mp->elt_size < sess_priv_sz)
1732                 return -EINVAL;
1733
1734         index = dev->driver_id;
1735         if (index >= sess->nb_drivers)
1736                 return -EINVAL;
1737
1738         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1739
1740         if (sess->sess_data[index].refcnt == 0) {
1741                 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1742                                                         sess, mp);
1743                 if (ret < 0) {
1744                         CDEV_LOG_ERR(
1745                                 "dev_id %d failed to configure session details",
1746                                 dev_id);
1747                         return ret;
1748                 }
1749         }
1750
1751         rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1752         sess->sess_data[index].refcnt++;
1753         return 0;
1754 }
1755
1756 int
1757 rte_cryptodev_asym_session_init(uint8_t dev_id,
1758                 struct rte_cryptodev_asym_session *sess,
1759                 struct rte_crypto_asym_xform *xforms,
1760                 struct rte_mempool *mp)
1761 {
1762         struct rte_cryptodev *dev;
1763         uint8_t index;
1764         int ret;
1765
1766         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1767                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1768                 return -EINVAL;
1769         }
1770
1771         dev = rte_cryptodev_pmd_get_dev(dev_id);
1772
1773         if (sess == NULL || xforms == NULL || dev == NULL)
1774                 return -EINVAL;
1775
1776         index = dev->driver_id;
1777
1778         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1779                                 -ENOTSUP);
1780
1781         if (sess->sess_private_data[index] == NULL) {
1782                 ret = dev->dev_ops->asym_session_configure(dev,
1783                                                         xforms,
1784                                                         sess, mp);
1785                 if (ret < 0) {
1786                         CDEV_LOG_ERR(
1787                                 "dev_id %d failed to configure session details",
1788                                 dev_id);
1789                         return ret;
1790                 }
1791         }
1792
1793         rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1794         return 0;
1795 }
1796
1797 struct rte_mempool *
1798 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1799         uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1800         int socket_id)
1801 {
1802         struct rte_mempool *mp;
1803         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1804         uint32_t obj_sz;
1805
1806         obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1807         if (obj_sz > elt_size)
1808                 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1809                                 obj_sz);
1810         else
1811                 obj_sz = elt_size;
1812
1813         mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1814                         (uint32_t)(sizeof(*pool_priv)),
1815                         NULL, NULL, NULL, NULL,
1816                         socket_id, 0);
1817         if (mp == NULL) {
1818                 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1819                         __func__, name, rte_errno);
1820                 return NULL;
1821         }
1822
1823         pool_priv = rte_mempool_get_priv(mp);
1824         if (!pool_priv) {
1825                 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1826                         __func__, name);
1827                 rte_mempool_free(mp);
1828                 return NULL;
1829         }
1830
1831         pool_priv->nb_drivers = nb_drivers;
1832         pool_priv->user_data_sz = user_data_size;
1833
1834         rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1835                 elt_size, cache_size, user_data_size, mp);
1836         return mp;
1837 }
1838
1839 static unsigned int
1840 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1841 {
1842         return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1843                         sess->user_data_sz;
1844 }
1845
1846 static uint8_t
1847 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1848 {
1849         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1850
1851         if (!mp)
1852                 return 0;
1853
1854         pool_priv = rte_mempool_get_priv(mp);
1855
1856         if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1857                         pool_priv->nb_drivers != nb_drivers ||
1858                         mp->elt_size <
1859                                 rte_cryptodev_sym_get_header_session_size()
1860                                 + pool_priv->user_data_sz)
1861                 return 0;
1862
1863         return 1;
1864 }
1865
1866 struct rte_cryptodev_sym_session *
1867 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1868 {
1869         struct rte_cryptodev_sym_session *sess;
1870         struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1871
1872         if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1873                 CDEV_LOG_ERR("Invalid mempool\n");
1874                 return NULL;
1875         }
1876
1877         pool_priv = rte_mempool_get_priv(mp);
1878
1879         /* Allocate a session structure from the session pool */
1880         if (rte_mempool_get(mp, (void **)&sess)) {
1881                 CDEV_LOG_ERR("couldn't get object from session mempool");
1882                 return NULL;
1883         }
1884
1885         sess->nb_drivers = pool_priv->nb_drivers;
1886         sess->user_data_sz = pool_priv->user_data_sz;
1887         sess->opaque_data = 0;
1888
1889         /* Clear device session pointer.
1890          * Include the flag indicating presence of user data
1891          */
1892         memset(sess->sess_data, 0,
1893                         rte_cryptodev_sym_session_data_size(sess));
1894
1895         rte_cryptodev_trace_sym_session_create(mp, sess);
1896         return sess;
1897 }
1898
1899 struct rte_cryptodev_asym_session *
1900 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1901 {
1902         struct rte_cryptodev_asym_session *sess;
1903         unsigned int session_size =
1904                         rte_cryptodev_asym_get_header_session_size();
1905
1906         if (!mp) {
1907                 CDEV_LOG_ERR("invalid mempool\n");
1908                 return NULL;
1909         }
1910
1911         /* Verify if provided mempool can hold elements big enough. */
1912         if (mp->elt_size < session_size) {
1913                 CDEV_LOG_ERR(
1914                         "mempool elements too small to hold session objects");
1915                 return NULL;
1916         }
1917
1918         /* Allocate a session structure from the session pool */
1919         if (rte_mempool_get(mp, (void **)&sess)) {
1920                 CDEV_LOG_ERR("couldn't get object from session mempool");
1921                 return NULL;
1922         }
1923
1924         /* Clear device session pointer.
1925          * Include the flag indicating presence of private data
1926          */
1927         memset(sess, 0, session_size);
1928
1929         rte_cryptodev_trace_asym_session_create(mp, sess);
1930         return sess;
1931 }
1932
1933 int
1934 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1935                 struct rte_cryptodev_sym_session *sess)
1936 {
1937         struct rte_cryptodev *dev;
1938         uint8_t driver_id;
1939
1940         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1941                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1942                 return -EINVAL;
1943         }
1944
1945         dev = rte_cryptodev_pmd_get_dev(dev_id);
1946
1947         if (dev == NULL || sess == NULL)
1948                 return -EINVAL;
1949
1950         driver_id = dev->driver_id;
1951         if (sess->sess_data[driver_id].refcnt == 0)
1952                 return 0;
1953         if (--sess->sess_data[driver_id].refcnt != 0)
1954                 return -EBUSY;
1955
1956         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1957
1958         dev->dev_ops->sym_session_clear(dev, sess);
1959
1960         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1961         return 0;
1962 }
1963
1964 int
1965 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1966                 struct rte_cryptodev_asym_session *sess)
1967 {
1968         struct rte_cryptodev *dev;
1969
1970         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1971                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1972                 return -EINVAL;
1973         }
1974
1975         dev = rte_cryptodev_pmd_get_dev(dev_id);
1976
1977         if (dev == NULL || sess == NULL)
1978                 return -EINVAL;
1979
1980         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1981
1982         dev->dev_ops->asym_session_clear(dev, sess);
1983
1984         rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1985         return 0;
1986 }
1987
1988 int
1989 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1990 {
1991         uint8_t i;
1992         struct rte_mempool *sess_mp;
1993
1994         if (sess == NULL)
1995                 return -EINVAL;
1996
1997         /* Check that all device private data has been freed */
1998         for (i = 0; i < sess->nb_drivers; i++) {
1999                 if (sess->sess_data[i].refcnt != 0)
2000                         return -EBUSY;
2001         }
2002
2003         /* Return session to mempool */
2004         sess_mp = rte_mempool_from_obj(sess);
2005         rte_mempool_put(sess_mp, sess);
2006
2007         rte_cryptodev_trace_sym_session_free(sess);
2008         return 0;
2009 }
2010
2011 int
2012 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
2013 {
2014         uint8_t i;
2015         void *sess_priv;
2016         struct rte_mempool *sess_mp;
2017
2018         if (sess == NULL)
2019                 return -EINVAL;
2020
2021         /* Check that all device private data has been freed */
2022         for (i = 0; i < nb_drivers; i++) {
2023                 sess_priv = get_asym_session_private_data(sess, i);
2024                 if (sess_priv != NULL)
2025                         return -EBUSY;
2026         }
2027
2028         /* Return session to mempool */
2029         sess_mp = rte_mempool_from_obj(sess);
2030         rte_mempool_put(sess_mp, sess);
2031
2032         rte_cryptodev_trace_asym_session_free(sess);
2033         return 0;
2034 }
2035
2036 unsigned int
2037 rte_cryptodev_sym_get_header_session_size(void)
2038 {
2039         /*
2040          * Header contains pointers to the private data of all registered
2041          * drivers and all necessary information to ensure safely clear
2042          * or free al session.
2043          */
2044         struct rte_cryptodev_sym_session s = {0};
2045
2046         s.nb_drivers = nb_drivers;
2047
2048         return (unsigned int)(sizeof(s) +
2049                         rte_cryptodev_sym_session_data_size(&s));
2050 }
2051
2052 unsigned int
2053 rte_cryptodev_sym_get_existing_header_session_size(
2054                 struct rte_cryptodev_sym_session *sess)
2055 {
2056         if (!sess)
2057                 return 0;
2058         else
2059                 return (unsigned int)(sizeof(*sess) +
2060                                 rte_cryptodev_sym_session_data_size(sess));
2061 }
2062
2063 unsigned int
2064 rte_cryptodev_asym_get_header_session_size(void)
2065 {
2066         /*
2067          * Header contains pointers to the private data
2068          * of all registered drivers, and a flag which
2069          * indicates presence of private data
2070          */
2071         return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
2072 }
2073
2074 unsigned int
2075 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2076 {
2077         struct rte_cryptodev *dev;
2078         unsigned int priv_sess_size;
2079
2080         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
2081                 return 0;
2082
2083         dev = rte_cryptodev_pmd_get_dev(dev_id);
2084
2085         if (*dev->dev_ops->sym_session_get_size == NULL)
2086                 return 0;
2087
2088         priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2089
2090         return priv_sess_size;
2091 }
2092
2093 unsigned int
2094 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2095 {
2096         struct rte_cryptodev *dev;
2097         unsigned int header_size = sizeof(void *) * nb_drivers;
2098         unsigned int priv_sess_size;
2099
2100         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
2101                 return 0;
2102
2103         dev = rte_cryptodev_pmd_get_dev(dev_id);
2104
2105         if (*dev->dev_ops->asym_session_get_size == NULL)
2106                 return 0;
2107
2108         priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2109         if (priv_sess_size < header_size)
2110                 return header_size;
2111
2112         return priv_sess_size;
2113
2114 }
2115
2116 int
2117 rte_cryptodev_sym_session_set_user_data(
2118                                         struct rte_cryptodev_sym_session *sess,
2119                                         void *data,
2120                                         uint16_t size)
2121 {
2122         if (sess == NULL)
2123                 return -EINVAL;
2124
2125         if (sess->user_data_sz < size)
2126                 return -ENOMEM;
2127
2128         rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
2129         return 0;
2130 }
2131
2132 void *
2133 rte_cryptodev_sym_session_get_user_data(
2134                                         struct rte_cryptodev_sym_session *sess)
2135 {
2136         if (sess == NULL || sess->user_data_sz == 0)
2137                 return NULL;
2138
2139         return (void *)(sess->sess_data + sess->nb_drivers);
2140 }
2141
2142 static inline void
2143 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2144 {
2145         uint32_t i;
2146         for (i = 0; i < vec->num; i++)
2147                 vec->status[i] = errnum;
2148 }
2149
2150 uint32_t
2151 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2152         struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
2153         struct rte_crypto_sym_vec *vec)
2154 {
2155         struct rte_cryptodev *dev;
2156
2157         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
2158                 sym_crypto_fill_status(vec, EINVAL);
2159                 return 0;
2160         }
2161
2162         dev = rte_cryptodev_pmd_get_dev(dev_id);
2163
2164         if (*dev->dev_ops->sym_cpu_process == NULL ||
2165                 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2166                 sym_crypto_fill_status(vec, ENOTSUP);
2167                 return 0;
2168         }
2169
2170         return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2171 }
2172
2173 int
2174 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2175 {
2176         struct rte_cryptodev *dev;
2177         int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2178         int32_t priv_size;
2179
2180         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
2181                 return -EINVAL;
2182
2183         dev = rte_cryptodev_pmd_get_dev(dev_id);
2184
2185         if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2186                 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2187                 return -ENOTSUP;
2188         }
2189
2190         priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2191         if (priv_size < 0)
2192                 return -ENOTSUP;
2193
2194         return RTE_ALIGN_CEIL((size + priv_size), 8);
2195 }
2196
2197 int
2198 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2199         struct rte_crypto_raw_dp_ctx *ctx,
2200         enum rte_crypto_op_sess_type sess_type,
2201         union rte_cryptodev_session_ctx session_ctx,
2202         uint8_t is_update)
2203 {
2204         struct rte_cryptodev *dev;
2205
2206         if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2207                 return -EINVAL;
2208
2209         dev = rte_cryptodev_pmd_get_dev(dev_id);
2210         if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2211                         || dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2212                 return -ENOTSUP;
2213
2214         return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2215                         sess_type, session_ctx, is_update);
2216 }
2217
2218 uint32_t
2219 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2220         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2221         void **user_data, int *enqueue_status)
2222 {
2223         return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2224                         ofs, user_data, enqueue_status);
2225 }
2226
2227 int
2228 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2229                 uint32_t n)
2230 {
2231         return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2232 }
2233
2234 uint32_t
2235 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2236         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2237         rte_cryptodev_raw_post_dequeue_t post_dequeue,
2238         void **out_user_data, uint8_t is_user_data_array,
2239         uint32_t *n_success_jobs, int *status)
2240 {
2241         return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2242                 get_dequeue_count, post_dequeue, out_user_data,
2243                 is_user_data_array, n_success_jobs, status);
2244 }
2245
2246 int
2247 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2248                 uint32_t n)
2249 {
2250         return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2251 }
2252
2253 /** Initialise rte_crypto_op mempool element */
2254 static void
2255 rte_crypto_op_init(struct rte_mempool *mempool,
2256                 void *opaque_arg,
2257                 void *_op_data,
2258                 __rte_unused unsigned i)
2259 {
2260         struct rte_crypto_op *op = _op_data;
2261         enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2262
2263         memset(_op_data, 0, mempool->elt_size);
2264
2265         __rte_crypto_op_reset(op, type);
2266
2267         op->phys_addr = rte_mem_virt2iova(_op_data);
2268         op->mempool = mempool;
2269 }
2270
2271
2272 struct rte_mempool *
2273 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2274                 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2275                 int socket_id)
2276 {
2277         struct rte_crypto_op_pool_private *priv;
2278
2279         unsigned elt_size = sizeof(struct rte_crypto_op) +
2280                         priv_size;
2281
2282         if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2283                 elt_size += sizeof(struct rte_crypto_sym_op);
2284         } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2285                 elt_size += sizeof(struct rte_crypto_asym_op);
2286         } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2287                 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2288                                     sizeof(struct rte_crypto_asym_op));
2289         } else {
2290                 CDEV_LOG_ERR("Invalid op_type\n");
2291                 return NULL;
2292         }
2293
2294         /* lookup mempool in case already allocated */
2295         struct rte_mempool *mp = rte_mempool_lookup(name);
2296
2297         if (mp != NULL) {
2298                 priv = (struct rte_crypto_op_pool_private *)
2299                                 rte_mempool_get_priv(mp);
2300
2301                 if (mp->elt_size != elt_size ||
2302                                 mp->cache_size < cache_size ||
2303                                 mp->size < nb_elts ||
2304                                 priv->priv_size <  priv_size) {
2305                         mp = NULL;
2306                         CDEV_LOG_ERR("Mempool %s already exists but with "
2307                                         "incompatible parameters", name);
2308                         return NULL;
2309                 }
2310                 return mp;
2311         }
2312
2313         mp = rte_mempool_create(
2314                         name,
2315                         nb_elts,
2316                         elt_size,
2317                         cache_size,
2318                         sizeof(struct rte_crypto_op_pool_private),
2319                         NULL,
2320                         NULL,
2321                         rte_crypto_op_init,
2322                         &type,
2323                         socket_id,
2324                         0);
2325
2326         if (mp == NULL) {
2327                 CDEV_LOG_ERR("Failed to create mempool %s", name);
2328                 return NULL;
2329         }
2330
2331         priv = (struct rte_crypto_op_pool_private *)
2332                         rte_mempool_get_priv(mp);
2333
2334         priv->priv_size = priv_size;
2335         priv->type = type;
2336
2337         return mp;
2338 }
2339
2340 int
2341 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2342 {
2343         struct rte_cryptodev *dev = NULL;
2344         uint32_t i = 0;
2345
2346         if (name == NULL)
2347                 return -EINVAL;
2348
2349         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2350                 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2351                                 "%s_%u", dev_name_prefix, i);
2352
2353                 if (ret < 0)
2354                         return ret;
2355
2356                 dev = rte_cryptodev_pmd_get_named_dev(name);
2357                 if (!dev)
2358                         return 0;
2359         }
2360
2361         return -1;
2362 }
2363
2364 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2365
2366 static struct cryptodev_driver_list cryptodev_driver_list =
2367         TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2368
2369 int
2370 rte_cryptodev_driver_id_get(const char *name)
2371 {
2372         struct cryptodev_driver *driver;
2373         const char *driver_name;
2374
2375         if (name == NULL) {
2376                 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2377                 return -1;
2378         }
2379
2380         TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2381                 driver_name = driver->driver->name;
2382                 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2383                         return driver->id;
2384         }
2385         return -1;
2386 }
2387
2388 const char *
2389 rte_cryptodev_name_get(uint8_t dev_id)
2390 {
2391         struct rte_cryptodev *dev;
2392
2393         if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2394                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2395                 return NULL;
2396         }
2397
2398         dev = rte_cryptodev_pmd_get_dev(dev_id);
2399         if (dev == NULL)
2400                 return NULL;
2401
2402         return dev->data->name;
2403 }
2404
2405 const char *
2406 rte_cryptodev_driver_name_get(uint8_t driver_id)
2407 {
2408         struct cryptodev_driver *driver;
2409
2410         TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2411                 if (driver->id == driver_id)
2412                         return driver->driver->name;
2413         return NULL;
2414 }
2415
2416 uint8_t
2417 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2418                 const struct rte_driver *drv)
2419 {
2420         crypto_drv->driver = drv;
2421         crypto_drv->id = nb_drivers;
2422
2423         TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2424
2425         return nb_drivers++;
2426 }