cryptodev: replace bus specific struct with generic dev
[dpdk.git] / lib / librte_cryptodev / rte_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43
44 static uint8_t nb_drivers;
45
46 struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
47
48 struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0];
49
50 static struct rte_cryptodev_global cryptodev_globals = {
51                 .devs                   = &rte_crypto_devices[0],
52                 .data                   = { NULL },
53                 .nb_devs                = 0,
54                 .max_devs               = RTE_CRYPTO_MAX_DEVS
55 };
56
57 struct rte_cryptodev_global *rte_cryptodev_globals = &cryptodev_globals;
58
59 /* spinlock for crypto device callbacks */
60 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
61
62
63 /**
64  * The user application callback description.
65  *
66  * It contains callback address to be registered by user application,
67  * the pointer to the parameters for callback, and the event type.
68  */
69 struct rte_cryptodev_callback {
70         TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
71         rte_cryptodev_cb_fn cb_fn;              /**< Callback address */
72         void *cb_arg;                           /**< Parameter for callback */
73         enum rte_cryptodev_event_type event;    /**< Interrupt event type */
74         uint32_t active;                        /**< Callback is executing */
75 };
76
77 /**
78  * The crypto cipher algorithm strings identifiers.
79  * It could be used in application command line.
80  */
81 const char *
82 rte_crypto_cipher_algorithm_strings[] = {
83         [RTE_CRYPTO_CIPHER_3DES_CBC]    = "3des-cbc",
84         [RTE_CRYPTO_CIPHER_3DES_ECB]    = "3des-ecb",
85         [RTE_CRYPTO_CIPHER_3DES_CTR]    = "3des-ctr",
86
87         [RTE_CRYPTO_CIPHER_AES_CBC]     = "aes-cbc",
88         [RTE_CRYPTO_CIPHER_AES_CTR]     = "aes-ctr",
89         [RTE_CRYPTO_CIPHER_AES_DOCSISBPI]       = "aes-docsisbpi",
90         [RTE_CRYPTO_CIPHER_AES_ECB]     = "aes-ecb",
91         [RTE_CRYPTO_CIPHER_AES_F8]      = "aes-f8",
92         [RTE_CRYPTO_CIPHER_AES_XTS]     = "aes-xts",
93
94         [RTE_CRYPTO_CIPHER_ARC4]        = "arc4",
95
96         [RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
97         [RTE_CRYPTO_CIPHER_DES_DOCSISBPI]       = "des-docsisbpi",
98
99         [RTE_CRYPTO_CIPHER_NULL]        = "null",
100
101         [RTE_CRYPTO_CIPHER_KASUMI_F8]   = "kasumi-f8",
102         [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
103         [RTE_CRYPTO_CIPHER_ZUC_EEA3]    = "zuc-eea3"
104 };
105
106 /**
107  * The crypto cipher operation strings identifiers.
108  * It could be used in application command line.
109  */
110 const char *
111 rte_crypto_cipher_operation_strings[] = {
112                 [RTE_CRYPTO_CIPHER_OP_ENCRYPT]  = "encrypt",
113                 [RTE_CRYPTO_CIPHER_OP_DECRYPT]  = "decrypt"
114 };
115
116 /**
117  * The crypto auth algorithm strings identifiers.
118  * It could be used in application command line.
119  */
120 const char *
121 rte_crypto_auth_algorithm_strings[] = {
122         [RTE_CRYPTO_AUTH_AES_CBC_MAC]   = "aes-cbc-mac",
123         [RTE_CRYPTO_AUTH_AES_CMAC]      = "aes-cmac",
124         [RTE_CRYPTO_AUTH_AES_GMAC]      = "aes-gmac",
125         [RTE_CRYPTO_AUTH_AES_XCBC_MAC]  = "aes-xcbc-mac",
126
127         [RTE_CRYPTO_AUTH_MD5]           = "md5",
128         [RTE_CRYPTO_AUTH_MD5_HMAC]      = "md5-hmac",
129
130         [RTE_CRYPTO_AUTH_NULL]          = "null",
131
132         [RTE_CRYPTO_AUTH_SHA1]          = "sha1",
133         [RTE_CRYPTO_AUTH_SHA1_HMAC]     = "sha1-hmac",
134
135         [RTE_CRYPTO_AUTH_SHA224]        = "sha2-224",
136         [RTE_CRYPTO_AUTH_SHA224_HMAC]   = "sha2-224-hmac",
137         [RTE_CRYPTO_AUTH_SHA256]        = "sha2-256",
138         [RTE_CRYPTO_AUTH_SHA256_HMAC]   = "sha2-256-hmac",
139         [RTE_CRYPTO_AUTH_SHA384]        = "sha2-384",
140         [RTE_CRYPTO_AUTH_SHA384_HMAC]   = "sha2-384-hmac",
141         [RTE_CRYPTO_AUTH_SHA512]        = "sha2-512",
142         [RTE_CRYPTO_AUTH_SHA512_HMAC]   = "sha2-512-hmac",
143
144         [RTE_CRYPTO_AUTH_KASUMI_F9]     = "kasumi-f9",
145         [RTE_CRYPTO_AUTH_SNOW3G_UIA2]   = "snow3g-uia2",
146         [RTE_CRYPTO_AUTH_ZUC_EIA3]      = "zuc-eia3"
147 };
148
149 /**
150  * The crypto AEAD algorithm strings identifiers.
151  * It could be used in application command line.
152  */
153 const char *
154 rte_crypto_aead_algorithm_strings[] = {
155         [RTE_CRYPTO_AEAD_AES_CCM]       = "aes-ccm",
156         [RTE_CRYPTO_AEAD_AES_GCM]       = "aes-gcm",
157 };
158
159 /**
160  * The crypto AEAD operation strings identifiers.
161  * It could be used in application command line.
162  */
163 const char *
164 rte_crypto_aead_operation_strings[] = {
165         [RTE_CRYPTO_AEAD_OP_ENCRYPT]    = "encrypt",
166         [RTE_CRYPTO_AEAD_OP_DECRYPT]    = "decrypt"
167 };
168
169 int
170 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
171                 const char *algo_string)
172 {
173         unsigned int i;
174
175         for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
176                 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
177                         *algo_enum = (enum rte_crypto_cipher_algorithm) i;
178                         return 0;
179                 }
180         }
181
182         /* Invalid string */
183         return -1;
184 }
185
186 int
187 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
188                 const char *algo_string)
189 {
190         unsigned int i;
191
192         for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
193                 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
194                         *algo_enum = (enum rte_crypto_auth_algorithm) i;
195                         return 0;
196                 }
197         }
198
199         /* Invalid string */
200         return -1;
201 }
202
203 int
204 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
205                 const char *algo_string)
206 {
207         unsigned int i;
208
209         for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
210                 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
211                         *algo_enum = (enum rte_crypto_aead_algorithm) i;
212                         return 0;
213                 }
214         }
215
216         /* Invalid string */
217         return -1;
218 }
219
220 /**
221  * The crypto auth operation strings identifiers.
222  * It could be used in application command line.
223  */
224 const char *
225 rte_crypto_auth_operation_strings[] = {
226                 [RTE_CRYPTO_AUTH_OP_VERIFY]     = "verify",
227                 [RTE_CRYPTO_AUTH_OP_GENERATE]   = "generate"
228 };
229
230 const struct rte_cryptodev_symmetric_capability *
231 rte_cryptodev_sym_capability_get(uint8_t dev_id,
232                 const struct rte_cryptodev_sym_capability_idx *idx)
233 {
234         const struct rte_cryptodev_capabilities *capability;
235         struct rte_cryptodev_info dev_info;
236         int i = 0;
237
238         rte_cryptodev_info_get(dev_id, &dev_info);
239
240         while ((capability = &dev_info.capabilities[i++])->op !=
241                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
242                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
243                         continue;
244
245                 if (capability->sym.xform_type != idx->type)
246                         continue;
247
248                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
249                         capability->sym.auth.algo == idx->algo.auth)
250                         return &capability->sym;
251
252                 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
253                         capability->sym.cipher.algo == idx->algo.cipher)
254                         return &capability->sym;
255
256                 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
257                                 capability->sym.aead.algo == idx->algo.aead)
258                         return &capability->sym;
259         }
260
261         return NULL;
262
263 }
264
265 static int
266 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
267 {
268         unsigned int next_size;
269
270         /* Check lower/upper bounds */
271         if (size < range->min)
272                 return -1;
273
274         if (size > range->max)
275                 return -1;
276
277         /* If range is actually only one value, size is correct */
278         if (range->increment == 0)
279                 return 0;
280
281         /* Check if value is one of the supported sizes */
282         for (next_size = range->min; next_size <= range->max;
283                         next_size += range->increment)
284                 if (size == next_size)
285                         return 0;
286
287         return -1;
288 }
289
290 int
291 rte_cryptodev_sym_capability_check_cipher(
292                 const struct rte_cryptodev_symmetric_capability *capability,
293                 uint16_t key_size, uint16_t iv_size)
294 {
295         if (param_range_check(key_size, &capability->cipher.key_size) != 0)
296                 return -1;
297
298         if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
299                 return -1;
300
301         return 0;
302 }
303
304 int
305 rte_cryptodev_sym_capability_check_auth(
306                 const struct rte_cryptodev_symmetric_capability *capability,
307                 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
308 {
309         if (param_range_check(key_size, &capability->auth.key_size) != 0)
310                 return -1;
311
312         if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
313                 return -1;
314
315         if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
316                 return -1;
317
318         return 0;
319 }
320
321 int
322 rte_cryptodev_sym_capability_check_aead(
323                 const struct rte_cryptodev_symmetric_capability *capability,
324                 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
325                 uint16_t iv_size)
326 {
327         if (param_range_check(key_size, &capability->aead.key_size) != 0)
328                 return -1;
329
330         if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
331                 return -1;
332
333         if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
334                 return -1;
335
336         if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
337                 return -1;
338
339         return 0;
340 }
341
342 const char *
343 rte_cryptodev_get_feature_name(uint64_t flag)
344 {
345         switch (flag) {
346         case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
347                 return "SYMMETRIC_CRYPTO";
348         case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
349                 return "ASYMMETRIC_CRYPTO";
350         case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
351                 return "SYM_OPERATION_CHAINING";
352         case RTE_CRYPTODEV_FF_CPU_SSE:
353                 return "CPU_SSE";
354         case RTE_CRYPTODEV_FF_CPU_AVX:
355                 return "CPU_AVX";
356         case RTE_CRYPTODEV_FF_CPU_AVX2:
357                 return "CPU_AVX2";
358         case RTE_CRYPTODEV_FF_CPU_AVX512:
359                 return "CPU_AVX512";
360         case RTE_CRYPTODEV_FF_CPU_AESNI:
361                 return "CPU_AESNI";
362         case RTE_CRYPTODEV_FF_HW_ACCELERATED:
363                 return "HW_ACCELERATED";
364         case RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER:
365                 return "MBUF_SCATTER_GATHER";
366         case RTE_CRYPTODEV_FF_CPU_NEON:
367                 return "CPU_NEON";
368         case RTE_CRYPTODEV_FF_CPU_ARM_CE:
369                 return "CPU_ARM_CE";
370         case RTE_CRYPTODEV_FF_SECURITY:
371                 return "SECURITY_PROTOCOL";
372         default:
373                 return NULL;
374         }
375 }
376
377 struct rte_cryptodev *
378 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
379 {
380         return &rte_cryptodev_globals->devs[dev_id];
381 }
382
383 struct rte_cryptodev *
384 rte_cryptodev_pmd_get_named_dev(const char *name)
385 {
386         struct rte_cryptodev *dev;
387         unsigned int i;
388
389         if (name == NULL)
390                 return NULL;
391
392         for (i = 0; i < rte_cryptodev_globals->max_devs; i++) {
393                 dev = &rte_cryptodev_globals->devs[i];
394
395                 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
396                                 (strcmp(dev->data->name, name) == 0))
397                         return dev;
398         }
399
400         return NULL;
401 }
402
403 unsigned int
404 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
405 {
406         struct rte_cryptodev *dev = NULL;
407
408         if (dev_id >= rte_cryptodev_globals->nb_devs)
409                 return 0;
410
411         dev = rte_cryptodev_pmd_get_dev(dev_id);
412         if (dev->attached != RTE_CRYPTODEV_ATTACHED)
413                 return 0;
414         else
415                 return 1;
416 }
417
418
419 int
420 rte_cryptodev_get_dev_id(const char *name)
421 {
422         unsigned i;
423
424         if (name == NULL)
425                 return -1;
426
427         for (i = 0; i < rte_cryptodev_globals->nb_devs; i++)
428                 if ((strcmp(rte_cryptodev_globals->devs[i].data->name, name)
429                                 == 0) &&
430                                 (rte_cryptodev_globals->devs[i].attached ==
431                                                 RTE_CRYPTODEV_ATTACHED))
432                         return i;
433
434         return -1;
435 }
436
437 uint8_t
438 rte_cryptodev_count(void)
439 {
440         return rte_cryptodev_globals->nb_devs;
441 }
442
443 uint8_t
444 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
445 {
446         uint8_t i, dev_count = 0;
447
448         for (i = 0; i < rte_cryptodev_globals->max_devs; i++)
449                 if (rte_cryptodev_globals->devs[i].driver_id == driver_id &&
450                         rte_cryptodev_globals->devs[i].attached ==
451                                         RTE_CRYPTODEV_ATTACHED)
452                         dev_count++;
453
454         return dev_count;
455 }
456
457 uint8_t
458 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
459         uint8_t nb_devices)
460 {
461         uint8_t i, count = 0;
462         struct rte_cryptodev *devs = rte_cryptodev_globals->devs;
463         uint8_t max_devs = rte_cryptodev_globals->max_devs;
464
465         for (i = 0; i < max_devs && count < nb_devices; i++) {
466
467                 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
468                         int cmp;
469
470                         cmp = strncmp(devs[i].device->driver->name,
471                                         driver_name,
472                                         strlen(driver_name));
473
474                         if (cmp == 0)
475                                 devices[count++] = devs[i].data->dev_id;
476                 }
477         }
478
479         return count;
480 }
481
482 void *
483 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
484 {
485         if (rte_crypto_devices[dev_id].feature_flags &
486                         RTE_CRYPTODEV_FF_SECURITY)
487                 return rte_crypto_devices[dev_id].security_ctx;
488
489         return NULL;
490 }
491
492 int
493 rte_cryptodev_socket_id(uint8_t dev_id)
494 {
495         struct rte_cryptodev *dev;
496
497         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
498                 return -1;
499
500         dev = rte_cryptodev_pmd_get_dev(dev_id);
501
502         return dev->data->socket_id;
503 }
504
505 static inline int
506 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
507                 int socket_id)
508 {
509         char mz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
510         const struct rte_memzone *mz;
511         int n;
512
513         /* generate memzone name */
514         n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
515         if (n >= (int)sizeof(mz_name))
516                 return -EINVAL;
517
518         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
519                 mz = rte_memzone_reserve(mz_name,
520                                 sizeof(struct rte_cryptodev_data),
521                                 socket_id, 0);
522         } else
523                 mz = rte_memzone_lookup(mz_name);
524
525         if (mz == NULL)
526                 return -ENOMEM;
527
528         *data = mz->addr;
529         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
530                 memset(*data, 0, sizeof(struct rte_cryptodev_data));
531
532         return 0;
533 }
534
535 static uint8_t
536 rte_cryptodev_find_free_device_index(void)
537 {
538         uint8_t dev_id;
539
540         for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
541                 if (rte_crypto_devices[dev_id].attached ==
542                                 RTE_CRYPTODEV_DETACHED)
543                         return dev_id;
544         }
545         return RTE_CRYPTO_MAX_DEVS;
546 }
547
548 struct rte_cryptodev *
549 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
550 {
551         struct rte_cryptodev *cryptodev;
552         uint8_t dev_id;
553
554         if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
555                 CDEV_LOG_ERR("Crypto device with name %s already "
556                                 "allocated!", name);
557                 return NULL;
558         }
559
560         dev_id = rte_cryptodev_find_free_device_index();
561         if (dev_id == RTE_CRYPTO_MAX_DEVS) {
562                 CDEV_LOG_ERR("Reached maximum number of crypto devices");
563                 return NULL;
564         }
565
566         cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
567
568         if (cryptodev->data == NULL) {
569                 struct rte_cryptodev_data *cryptodev_data =
570                                 cryptodev_globals.data[dev_id];
571
572                 int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data,
573                                 socket_id);
574
575                 if (retval < 0 || cryptodev_data == NULL)
576                         return NULL;
577
578                 cryptodev->data = cryptodev_data;
579
580                 snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN,
581                                 "%s", name);
582
583                 cryptodev->data->dev_id = dev_id;
584                 cryptodev->data->socket_id = socket_id;
585                 cryptodev->data->dev_started = 0;
586
587                 /* init user callbacks */
588                 TAILQ_INIT(&(cryptodev->link_intr_cbs));
589
590                 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
591
592                 cryptodev_globals.nb_devs++;
593         }
594
595         return cryptodev;
596 }
597
598 int
599 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
600 {
601         int ret;
602
603         if (cryptodev == NULL)
604                 return -EINVAL;
605
606         /* Close device only if device operations have been set */
607         if (cryptodev->dev_ops) {
608                 ret = rte_cryptodev_close(cryptodev->data->dev_id);
609                 if (ret < 0)
610                         return ret;
611         }
612
613         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
614         cryptodev_globals.nb_devs--;
615         return 0;
616 }
617
618 uint16_t
619 rte_cryptodev_queue_pair_count(uint8_t dev_id)
620 {
621         struct rte_cryptodev *dev;
622
623         dev = &rte_crypto_devices[dev_id];
624         return dev->data->nb_queue_pairs;
625 }
626
627 static int
628 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
629                 int socket_id)
630 {
631         struct rte_cryptodev_info dev_info;
632         void **qp;
633         unsigned i;
634
635         if ((dev == NULL) || (nb_qpairs < 1)) {
636                 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
637                                                         dev, nb_qpairs);
638                 return -EINVAL;
639         }
640
641         CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
642                         nb_qpairs, dev->data->dev_id);
643
644         memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
645
646         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
647         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
648
649         if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
650                 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
651                                 nb_qpairs, dev->data->dev_id);
652             return -EINVAL;
653         }
654
655         if (dev->data->queue_pairs == NULL) { /* first time configuration */
656                 dev->data->queue_pairs = rte_zmalloc_socket(
657                                 "cryptodev->queue_pairs",
658                                 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
659                                 RTE_CACHE_LINE_SIZE, socket_id);
660
661                 if (dev->data->queue_pairs == NULL) {
662                         dev->data->nb_queue_pairs = 0;
663                         CDEV_LOG_ERR("failed to get memory for qp meta data, "
664                                                         "nb_queues %u",
665                                                         nb_qpairs);
666                         return -(ENOMEM);
667                 }
668         } else { /* re-configure */
669                 int ret;
670                 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
671
672                 qp = dev->data->queue_pairs;
673
674                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
675                                 -ENOTSUP);
676
677                 for (i = nb_qpairs; i < old_nb_queues; i++) {
678                         ret = (*dev->dev_ops->queue_pair_release)(dev, i);
679                         if (ret < 0)
680                                 return ret;
681                 }
682
683                 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
684                                 RTE_CACHE_LINE_SIZE);
685                 if (qp == NULL) {
686                         CDEV_LOG_ERR("failed to realloc qp meta data,"
687                                                 " nb_queues %u", nb_qpairs);
688                         return -(ENOMEM);
689                 }
690
691                 if (nb_qpairs > old_nb_queues) {
692                         uint16_t new_qs = nb_qpairs - old_nb_queues;
693
694                         memset(qp + old_nb_queues, 0,
695                                 sizeof(qp[0]) * new_qs);
696                 }
697
698                 dev->data->queue_pairs = qp;
699
700         }
701         dev->data->nb_queue_pairs = nb_qpairs;
702         return 0;
703 }
704
705 int
706 rte_cryptodev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id)
707 {
708         struct rte_cryptodev *dev;
709
710         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
711                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
712                 return -EINVAL;
713         }
714
715         dev = &rte_crypto_devices[dev_id];
716         if (queue_pair_id >= dev->data->nb_queue_pairs) {
717                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
718                 return -EINVAL;
719         }
720
721         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_start, -ENOTSUP);
722
723         return dev->dev_ops->queue_pair_start(dev, queue_pair_id);
724
725 }
726
727 int
728 rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
729 {
730         struct rte_cryptodev *dev;
731
732         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
733                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
734                 return -EINVAL;
735         }
736
737         dev = &rte_crypto_devices[dev_id];
738         if (queue_pair_id >= dev->data->nb_queue_pairs) {
739                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
740                 return -EINVAL;
741         }
742
743         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_stop, -ENOTSUP);
744
745         return dev->dev_ops->queue_pair_stop(dev, queue_pair_id);
746
747 }
748
749 int
750 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
751 {
752         struct rte_cryptodev *dev;
753         int diag;
754
755         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
756                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
757                 return -EINVAL;
758         }
759
760         dev = &rte_crypto_devices[dev_id];
761
762         if (dev->data->dev_started) {
763                 CDEV_LOG_ERR(
764                     "device %d must be stopped to allow configuration", dev_id);
765                 return -EBUSY;
766         }
767
768         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
769
770         /* Setup new number of queue pairs and reconfigure device. */
771         diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
772                         config->socket_id);
773         if (diag != 0) {
774                 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
775                                 dev_id, diag);
776                 return diag;
777         }
778
779         return (*dev->dev_ops->dev_configure)(dev, config);
780 }
781
782
783 int
784 rte_cryptodev_start(uint8_t dev_id)
785 {
786         struct rte_cryptodev *dev;
787         int diag;
788
789         CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
790
791         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
792                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
793                 return -EINVAL;
794         }
795
796         dev = &rte_crypto_devices[dev_id];
797
798         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
799
800         if (dev->data->dev_started != 0) {
801                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
802                         dev_id);
803                 return 0;
804         }
805
806         diag = (*dev->dev_ops->dev_start)(dev);
807         if (diag == 0)
808                 dev->data->dev_started = 1;
809         else
810                 return diag;
811
812         return 0;
813 }
814
815 void
816 rte_cryptodev_stop(uint8_t dev_id)
817 {
818         struct rte_cryptodev *dev;
819
820         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
821                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
822                 return;
823         }
824
825         dev = &rte_crypto_devices[dev_id];
826
827         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
828
829         if (dev->data->dev_started == 0) {
830                 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
831                         dev_id);
832                 return;
833         }
834
835         (*dev->dev_ops->dev_stop)(dev);
836         dev->data->dev_started = 0;
837 }
838
839 int
840 rte_cryptodev_close(uint8_t dev_id)
841 {
842         struct rte_cryptodev *dev;
843         int retval;
844
845         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
846                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
847                 return -1;
848         }
849
850         dev = &rte_crypto_devices[dev_id];
851
852         /* Device must be stopped before it can be closed */
853         if (dev->data->dev_started == 1) {
854                 CDEV_LOG_ERR("Device %u must be stopped before closing",
855                                 dev_id);
856                 return -EBUSY;
857         }
858
859         /* We can't close the device if there are outstanding sessions in use */
860         if (dev->data->session_pool != NULL) {
861                 if (!rte_mempool_full(dev->data->session_pool)) {
862                         CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
863                                         "has sessions still in use, free "
864                                         "all sessions before calling close",
865                                         (unsigned)dev_id);
866                         return -EBUSY;
867                 }
868         }
869
870         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
871         retval = (*dev->dev_ops->dev_close)(dev);
872
873         if (retval < 0)
874                 return retval;
875
876         return 0;
877 }
878
879 int
880 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
881                 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
882                 struct rte_mempool *session_pool)
883
884 {
885         struct rte_cryptodev *dev;
886
887         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
888                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
889                 return -EINVAL;
890         }
891
892         dev = &rte_crypto_devices[dev_id];
893         if (queue_pair_id >= dev->data->nb_queue_pairs) {
894                 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
895                 return -EINVAL;
896         }
897
898         if (dev->data->dev_started) {
899                 CDEV_LOG_ERR(
900                     "device %d must be stopped to allow configuration", dev_id);
901                 return -EBUSY;
902         }
903
904         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
905
906         return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
907                         socket_id, session_pool);
908 }
909
910
911 int
912 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
913 {
914         struct rte_cryptodev *dev;
915
916         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
917                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
918                 return -ENODEV;
919         }
920
921         if (stats == NULL) {
922                 CDEV_LOG_ERR("Invalid stats ptr");
923                 return -EINVAL;
924         }
925
926         dev = &rte_crypto_devices[dev_id];
927         memset(stats, 0, sizeof(*stats));
928
929         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
930         (*dev->dev_ops->stats_get)(dev, stats);
931         return 0;
932 }
933
934 void
935 rte_cryptodev_stats_reset(uint8_t dev_id)
936 {
937         struct rte_cryptodev *dev;
938
939         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
940                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
941                 return;
942         }
943
944         dev = &rte_crypto_devices[dev_id];
945
946         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
947         (*dev->dev_ops->stats_reset)(dev);
948 }
949
950
951 void
952 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
953 {
954         struct rte_cryptodev *dev;
955
956         if (dev_id >= cryptodev_globals.nb_devs) {
957                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
958                 return;
959         }
960
961         dev = &rte_crypto_devices[dev_id];
962
963         memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
964
965         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
966         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
967
968         dev_info->driver_name = dev->device->driver->name;
969         dev_info->device = dev->device;
970 }
971
972
973 int
974 rte_cryptodev_callback_register(uint8_t dev_id,
975                         enum rte_cryptodev_event_type event,
976                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
977 {
978         struct rte_cryptodev *dev;
979         struct rte_cryptodev_callback *user_cb;
980
981         if (!cb_fn)
982                 return -EINVAL;
983
984         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
985                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
986                 return -EINVAL;
987         }
988
989         dev = &rte_crypto_devices[dev_id];
990         rte_spinlock_lock(&rte_cryptodev_cb_lock);
991
992         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
993                 if (user_cb->cb_fn == cb_fn &&
994                         user_cb->cb_arg == cb_arg &&
995                         user_cb->event == event) {
996                         break;
997                 }
998         }
999
1000         /* create a new callback. */
1001         if (user_cb == NULL) {
1002                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1003                                 sizeof(struct rte_cryptodev_callback), 0);
1004                 if (user_cb != NULL) {
1005                         user_cb->cb_fn = cb_fn;
1006                         user_cb->cb_arg = cb_arg;
1007                         user_cb->event = event;
1008                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1009                 }
1010         }
1011
1012         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1013         return (user_cb == NULL) ? -ENOMEM : 0;
1014 }
1015
1016 int
1017 rte_cryptodev_callback_unregister(uint8_t dev_id,
1018                         enum rte_cryptodev_event_type event,
1019                         rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1020 {
1021         int ret;
1022         struct rte_cryptodev *dev;
1023         struct rte_cryptodev_callback *cb, *next;
1024
1025         if (!cb_fn)
1026                 return -EINVAL;
1027
1028         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1029                 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1030                 return -EINVAL;
1031         }
1032
1033         dev = &rte_crypto_devices[dev_id];
1034         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1035
1036         ret = 0;
1037         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1038
1039                 next = TAILQ_NEXT(cb, next);
1040
1041                 if (cb->cb_fn != cb_fn || cb->event != event ||
1042                                 (cb->cb_arg != (void *)-1 &&
1043                                 cb->cb_arg != cb_arg))
1044                         continue;
1045
1046                 /*
1047                  * if this callback is not executing right now,
1048                  * then remove it.
1049                  */
1050                 if (cb->active == 0) {
1051                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1052                         rte_free(cb);
1053                 } else {
1054                         ret = -EAGAIN;
1055                 }
1056         }
1057
1058         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1059         return ret;
1060 }
1061
1062 void
1063 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1064         enum rte_cryptodev_event_type event)
1065 {
1066         struct rte_cryptodev_callback *cb_lst;
1067         struct rte_cryptodev_callback dev_cb;
1068
1069         rte_spinlock_lock(&rte_cryptodev_cb_lock);
1070         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1071                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1072                         continue;
1073                 dev_cb = *cb_lst;
1074                 cb_lst->active = 1;
1075                 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1076                 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1077                                                 dev_cb.cb_arg);
1078                 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1079                 cb_lst->active = 0;
1080         }
1081         rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1082 }
1083
1084
1085 int
1086 rte_cryptodev_sym_session_init(uint8_t dev_id,
1087                 struct rte_cryptodev_sym_session *sess,
1088                 struct rte_crypto_sym_xform *xforms,
1089                 struct rte_mempool *mp)
1090 {
1091         struct rte_cryptodev *dev;
1092         uint8_t index;
1093         int ret;
1094
1095         dev = rte_cryptodev_pmd_get_dev(dev_id);
1096
1097         if (sess == NULL || xforms == NULL || dev == NULL)
1098                 return -EINVAL;
1099
1100         index = dev->driver_id;
1101
1102         if (sess->sess_private_data[index] == NULL) {
1103                 ret = dev->dev_ops->session_configure(dev, xforms, sess, mp);
1104                 if (ret < 0) {
1105                         CDEV_LOG_ERR(
1106                                 "dev_id %d failed to configure session details",
1107                                 dev_id);
1108                         return ret;
1109                 }
1110         }
1111
1112         return 0;
1113 }
1114
1115 struct rte_cryptodev_sym_session *
1116 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1117 {
1118         struct rte_cryptodev_sym_session *sess;
1119
1120         /* Allocate a session structure from the session pool */
1121         if (rte_mempool_get(mp, (void **)&sess)) {
1122                 CDEV_LOG_ERR("couldn't get object from session mempool");
1123                 return NULL;
1124         }
1125
1126         /* Clear device session pointer.
1127          * Include the flag indicating presence of private data
1128          */
1129         memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1130
1131         return sess;
1132 }
1133
1134 int
1135 rte_cryptodev_queue_pair_attach_sym_session(uint8_t dev_id, uint16_t qp_id,
1136                 struct rte_cryptodev_sym_session *sess)
1137 {
1138         struct rte_cryptodev *dev;
1139
1140         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1141                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1142                 return -EINVAL;
1143         }
1144
1145         dev = &rte_crypto_devices[dev_id];
1146
1147         /* The API is optional, not returning error if driver do not suuport */
1148         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_attach_session, 0);
1149
1150         void *sess_priv = get_session_private_data(sess, dev->driver_id);
1151
1152         if (dev->dev_ops->qp_attach_session(dev, qp_id, sess_priv)) {
1153                 CDEV_LOG_ERR("dev_id %d failed to attach qp: %d with session",
1154                                 dev_id, qp_id);
1155                 return -EPERM;
1156         }
1157
1158         return 0;
1159 }
1160
1161 int
1162 rte_cryptodev_queue_pair_detach_sym_session(uint8_t dev_id, uint16_t qp_id,
1163                 struct rte_cryptodev_sym_session *sess)
1164 {
1165         struct rte_cryptodev *dev;
1166
1167         if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1168                 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1169                 return -EINVAL;
1170         }
1171
1172         dev = &rte_crypto_devices[dev_id];
1173
1174         /* The API is optional, not returning error if driver do not suuport */
1175         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_detach_session, 0);
1176
1177         void *sess_priv = get_session_private_data(sess, dev->driver_id);
1178
1179         if (dev->dev_ops->qp_detach_session(dev, qp_id, sess_priv)) {
1180                 CDEV_LOG_ERR("dev_id %d failed to detach qp: %d from session",
1181                                 dev_id, qp_id);
1182                 return -EPERM;
1183         }
1184
1185         return 0;
1186 }
1187
1188 int
1189 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1190                 struct rte_cryptodev_sym_session *sess)
1191 {
1192         struct rte_cryptodev *dev;
1193
1194         dev = rte_cryptodev_pmd_get_dev(dev_id);
1195
1196         if (dev == NULL || sess == NULL)
1197                 return -EINVAL;
1198
1199         dev->dev_ops->session_clear(dev, sess);
1200
1201         return 0;
1202 }
1203
1204 int
1205 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1206 {
1207         uint8_t i;
1208         void *sess_priv;
1209         struct rte_mempool *sess_mp;
1210
1211         if (sess == NULL)
1212                 return -EINVAL;
1213
1214         /* Check that all device private data has been freed */
1215         for (i = 0; i < nb_drivers; i++) {
1216                 sess_priv = get_session_private_data(sess, i);
1217                 if (sess_priv != NULL)
1218                         return -EBUSY;
1219         }
1220
1221         /* Return session to mempool */
1222         sess_mp = rte_mempool_from_obj(sess);
1223         rte_mempool_put(sess_mp, sess);
1224
1225         return 0;
1226 }
1227
1228 unsigned int
1229 rte_cryptodev_get_header_session_size(void)
1230 {
1231         return rte_cryptodev_sym_get_header_session_size();
1232 }
1233
1234 unsigned int
1235 rte_cryptodev_sym_get_header_session_size(void)
1236 {
1237         /*
1238          * Header contains pointers to the private data
1239          * of all registered drivers, and a flag which
1240          * indicates presence of private data
1241          */
1242         return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1243 }
1244
1245 unsigned int
1246 rte_cryptodev_get_private_session_size(uint8_t dev_id)
1247 {
1248         return rte_cryptodev_sym_get_private_session_size(dev_id);
1249 }
1250
1251 unsigned int
1252 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1253 {
1254         struct rte_cryptodev *dev;
1255         unsigned int header_size = sizeof(void *) * nb_drivers;
1256         unsigned int priv_sess_size;
1257
1258         if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1259                 return 0;
1260
1261         dev = rte_cryptodev_pmd_get_dev(dev_id);
1262
1263         if (*dev->dev_ops->session_get_size == NULL)
1264                 return 0;
1265
1266         priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
1267
1268         /*
1269          * If size is less than session header size,
1270          * return the latter, as this guarantees that
1271          * sessionless operations will work
1272          */
1273         if (priv_sess_size < header_size)
1274                 return header_size;
1275
1276         return priv_sess_size;
1277
1278 }
1279
1280 int __rte_experimental
1281 rte_cryptodev_sym_session_set_private_data(
1282                                         struct rte_cryptodev_sym_session *sess,
1283                                         void *data,
1284                                         uint16_t size)
1285 {
1286         uint16_t off_set = sizeof(void *) * nb_drivers;
1287         uint8_t *private_data_present = (uint8_t *)sess + off_set;
1288
1289         if (sess == NULL)
1290                 return -EINVAL;
1291
1292         *private_data_present = 1;
1293         off_set += sizeof(uint8_t);
1294         rte_memcpy((uint8_t *)sess + off_set, data, size);
1295         return 0;
1296 }
1297
1298 void * __rte_experimental
1299 rte_cryptodev_sym_session_get_private_data(
1300                                         struct rte_cryptodev_sym_session *sess)
1301 {
1302         uint16_t off_set = sizeof(void *) * nb_drivers;
1303         uint8_t *private_data_present = (uint8_t *)sess + off_set;
1304
1305         if (sess == NULL || !*private_data_present)
1306                 return NULL;
1307
1308         off_set += sizeof(uint8_t);
1309         return (uint8_t *)sess + off_set;
1310 }
1311
1312 /** Initialise rte_crypto_op mempool element */
1313 static void
1314 rte_crypto_op_init(struct rte_mempool *mempool,
1315                 void *opaque_arg,
1316                 void *_op_data,
1317                 __rte_unused unsigned i)
1318 {
1319         struct rte_crypto_op *op = _op_data;
1320         enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1321
1322         memset(_op_data, 0, mempool->elt_size);
1323
1324         __rte_crypto_op_reset(op, type);
1325
1326         op->phys_addr = rte_mem_virt2iova(_op_data);
1327         op->mempool = mempool;
1328 }
1329
1330
1331 struct rte_mempool *
1332 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1333                 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1334                 int socket_id)
1335 {
1336         struct rte_crypto_op_pool_private *priv;
1337
1338         unsigned elt_size = sizeof(struct rte_crypto_op) +
1339                         sizeof(struct rte_crypto_sym_op) +
1340                         priv_size;
1341
1342         /* lookup mempool in case already allocated */
1343         struct rte_mempool *mp = rte_mempool_lookup(name);
1344
1345         if (mp != NULL) {
1346                 priv = (struct rte_crypto_op_pool_private *)
1347                                 rte_mempool_get_priv(mp);
1348
1349                 if (mp->elt_size != elt_size ||
1350                                 mp->cache_size < cache_size ||
1351                                 mp->size < nb_elts ||
1352                                 priv->priv_size <  priv_size) {
1353                         mp = NULL;
1354                         CDEV_LOG_ERR("Mempool %s already exists but with "
1355                                         "incompatible parameters", name);
1356                         return NULL;
1357                 }
1358                 return mp;
1359         }
1360
1361         mp = rte_mempool_create(
1362                         name,
1363                         nb_elts,
1364                         elt_size,
1365                         cache_size,
1366                         sizeof(struct rte_crypto_op_pool_private),
1367                         NULL,
1368                         NULL,
1369                         rte_crypto_op_init,
1370                         &type,
1371                         socket_id,
1372                         0);
1373
1374         if (mp == NULL) {
1375                 CDEV_LOG_ERR("Failed to create mempool %s", name);
1376                 return NULL;
1377         }
1378
1379         priv = (struct rte_crypto_op_pool_private *)
1380                         rte_mempool_get_priv(mp);
1381
1382         priv->priv_size = priv_size;
1383         priv->type = type;
1384
1385         return mp;
1386 }
1387
1388 int
1389 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1390 {
1391         struct rte_cryptodev *dev = NULL;
1392         uint32_t i = 0;
1393
1394         if (name == NULL)
1395                 return -EINVAL;
1396
1397         for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1398                 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1399                                 "%s_%u", dev_name_prefix, i);
1400
1401                 if (ret < 0)
1402                         return ret;
1403
1404                 dev = rte_cryptodev_pmd_get_named_dev(name);
1405                 if (!dev)
1406                         return 0;
1407         }
1408
1409         return -1;
1410 }
1411
1412 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1413
1414 static struct cryptodev_driver_list cryptodev_driver_list =
1415         TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1416
1417 int
1418 rte_cryptodev_driver_id_get(const char *name)
1419 {
1420         struct cryptodev_driver *driver;
1421         const char *driver_name;
1422
1423         if (name == NULL) {
1424                 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1425                 return -1;
1426         }
1427
1428         TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1429                 driver_name = driver->driver->name;
1430                 if (strncmp(driver_name, name, strlen(driver_name)) == 0)
1431                         return driver->id;
1432         }
1433         return -1;
1434 }
1435
1436 const char *
1437 rte_cryptodev_name_get(uint8_t dev_id)
1438 {
1439         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id);
1440
1441         if (dev == NULL)
1442                 return NULL;
1443
1444         return dev->data->name;
1445 }
1446
1447 const char *
1448 rte_cryptodev_driver_name_get(uint8_t driver_id)
1449 {
1450         struct cryptodev_driver *driver;
1451
1452         TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1453                 if (driver->id == driver_id)
1454                         return driver->driver->name;
1455         return NULL;
1456 }
1457
1458 uint8_t
1459 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1460                 const struct rte_driver *drv)
1461 {
1462         crypto_drv->driver = drv;
1463         crypto_drv->id = nb_drivers;
1464
1465         TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1466
1467         return nb_drivers++;
1468 }