1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
15 #include <netinet/in.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
44 static uint8_t nb_drivers;
46 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
48 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
50 static struct rte_cryptodev_global cryptodev_globals = {
51 .devs = rte_crypto_devices,
54 .max_devs = RTE_CRYPTO_MAX_DEVS
57 /* spinlock for crypto device callbacks */
58 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
62 * The user application callback description.
64 * It contains callback address to be registered by user application,
65 * the pointer to the parameters for callback, and the event type.
67 struct rte_cryptodev_callback {
68 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
69 rte_cryptodev_cb_fn cb_fn; /**< Callback address */
70 void *cb_arg; /**< Parameter for callback */
71 enum rte_cryptodev_event_type event; /**< Interrupt event type */
72 uint32_t active; /**< Callback is executing */
76 * The crypto cipher algorithm strings identifiers.
77 * It could be used in application command line.
80 rte_crypto_cipher_algorithm_strings[] = {
81 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc",
82 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb",
83 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr",
85 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc",
86 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr",
87 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi",
88 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb",
89 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8",
90 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts",
92 [RTE_CRYPTO_CIPHER_ARC4] = "arc4",
94 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc",
95 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi",
97 [RTE_CRYPTO_CIPHER_NULL] = "null",
99 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8",
100 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
101 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3"
105 * The crypto cipher operation strings identifiers.
106 * It could be used in application command line.
109 rte_crypto_cipher_operation_strings[] = {
110 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt",
111 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt"
115 * The crypto auth algorithm strings identifiers.
116 * It could be used in application command line.
119 rte_crypto_auth_algorithm_strings[] = {
120 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac",
121 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac",
122 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac",
123 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac",
125 [RTE_CRYPTO_AUTH_MD5] = "md5",
126 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac",
128 [RTE_CRYPTO_AUTH_NULL] = "null",
130 [RTE_CRYPTO_AUTH_SHA1] = "sha1",
131 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac",
133 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224",
134 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac",
135 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256",
136 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac",
137 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384",
138 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac",
139 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512",
140 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac",
142 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9",
143 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2",
144 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3"
148 * The crypto AEAD algorithm strings identifiers.
149 * It could be used in application command line.
152 rte_crypto_aead_algorithm_strings[] = {
153 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm",
154 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm",
158 * The crypto AEAD operation strings identifiers.
159 * It could be used in application command line.
162 rte_crypto_aead_operation_strings[] = {
163 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt",
164 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt"
168 * Asymmetric crypto transform operation strings identifiers.
170 const char *rte_crypto_asym_xform_strings[] = {
171 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none",
172 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa",
173 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp",
174 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv",
175 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh",
176 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa",
180 * Asymmetric crypto operation strings identifiers.
182 const char *rte_crypto_asym_op_strings[] = {
183 [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt",
184 [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt",
185 [RTE_CRYPTO_ASYM_OP_SIGN] = "sign",
186 [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify",
187 [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE] = "priv_key_generate",
188 [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
189 [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
193 * The private data structure stored in the session mempool private data.
195 struct rte_cryptodev_sym_session_pool_private_data {
197 /**< number of elements in sess_data array */
198 uint16_t user_data_sz;
199 /**< session user data will be placed after sess_data */
203 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
204 const char *algo_string)
208 for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
209 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
210 *algo_enum = (enum rte_crypto_cipher_algorithm) i;
220 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
221 const char *algo_string)
225 for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
226 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
227 *algo_enum = (enum rte_crypto_auth_algorithm) i;
237 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
238 const char *algo_string)
242 for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
243 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
244 *algo_enum = (enum rte_crypto_aead_algorithm) i;
253 int __rte_experimental
254 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
255 const char *xform_string)
259 for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
260 if (strcmp(xform_string,
261 rte_crypto_asym_xform_strings[i]) == 0) {
262 *xform_enum = (enum rte_crypto_asym_xform_type) i;
272 * The crypto auth operation strings identifiers.
273 * It could be used in application command line.
276 rte_crypto_auth_operation_strings[] = {
277 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify",
278 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate"
281 const struct rte_cryptodev_symmetric_capability *
282 rte_cryptodev_sym_capability_get(uint8_t dev_id,
283 const struct rte_cryptodev_sym_capability_idx *idx)
285 const struct rte_cryptodev_capabilities *capability;
286 struct rte_cryptodev_info dev_info;
289 rte_cryptodev_info_get(dev_id, &dev_info);
291 while ((capability = &dev_info.capabilities[i++])->op !=
292 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
293 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
296 if (capability->sym.xform_type != idx->type)
299 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
300 capability->sym.auth.algo == idx->algo.auth)
301 return &capability->sym;
303 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
304 capability->sym.cipher.algo == idx->algo.cipher)
305 return &capability->sym;
307 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
308 capability->sym.aead.algo == idx->algo.aead)
309 return &capability->sym;
317 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
319 unsigned int next_size;
321 /* Check lower/upper bounds */
322 if (size < range->min)
325 if (size > range->max)
328 /* If range is actually only one value, size is correct */
329 if (range->increment == 0)
332 /* Check if value is one of the supported sizes */
333 for (next_size = range->min; next_size <= range->max;
334 next_size += range->increment)
335 if (size == next_size)
341 const struct rte_cryptodev_asymmetric_xform_capability * __rte_experimental
342 rte_cryptodev_asym_capability_get(uint8_t dev_id,
343 const struct rte_cryptodev_asym_capability_idx *idx)
345 const struct rte_cryptodev_capabilities *capability;
346 struct rte_cryptodev_info dev_info;
349 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
350 rte_cryptodev_info_get(dev_id, &dev_info);
352 while ((capability = &dev_info.capabilities[i++])->op !=
353 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
354 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
357 if (capability->asym.xform_capa.xform_type == idx->type)
358 return &capability->asym.xform_capa;
364 rte_cryptodev_sym_capability_check_cipher(
365 const struct rte_cryptodev_symmetric_capability *capability,
366 uint16_t key_size, uint16_t iv_size)
368 if (param_range_check(key_size, &capability->cipher.key_size) != 0)
371 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
378 rte_cryptodev_sym_capability_check_auth(
379 const struct rte_cryptodev_symmetric_capability *capability,
380 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
382 if (param_range_check(key_size, &capability->auth.key_size) != 0)
385 if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
388 if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
395 rte_cryptodev_sym_capability_check_aead(
396 const struct rte_cryptodev_symmetric_capability *capability,
397 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
400 if (param_range_check(key_size, &capability->aead.key_size) != 0)
403 if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
406 if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
409 if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
414 int __rte_experimental
415 rte_cryptodev_asym_xform_capability_check_optype(
416 const struct rte_cryptodev_asymmetric_xform_capability *capability,
417 enum rte_crypto_asym_op_type op_type)
419 if (capability->op_types & (1 << op_type))
425 int __rte_experimental
426 rte_cryptodev_asym_xform_capability_check_modlen(
427 const struct rte_cryptodev_asymmetric_xform_capability *capability,
430 /* no need to check for limits, if min or max = 0 */
431 if (capability->modlen.min != 0) {
432 if (modlen < capability->modlen.min)
436 if (capability->modlen.max != 0) {
437 if (modlen > capability->modlen.max)
441 /* in any case, check if given modlen is module increment */
442 if (capability->modlen.increment != 0) {
443 if (modlen % (capability->modlen.increment))
452 rte_cryptodev_get_feature_name(uint64_t flag)
455 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
456 return "SYMMETRIC_CRYPTO";
457 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
458 return "ASYMMETRIC_CRYPTO";
459 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
460 return "SYM_OPERATION_CHAINING";
461 case RTE_CRYPTODEV_FF_CPU_SSE:
463 case RTE_CRYPTODEV_FF_CPU_AVX:
465 case RTE_CRYPTODEV_FF_CPU_AVX2:
467 case RTE_CRYPTODEV_FF_CPU_AVX512:
469 case RTE_CRYPTODEV_FF_CPU_AESNI:
471 case RTE_CRYPTODEV_FF_HW_ACCELERATED:
472 return "HW_ACCELERATED";
473 case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
474 return "IN_PLACE_SGL";
475 case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
476 return "OOP_SGL_IN_SGL_OUT";
477 case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
478 return "OOP_SGL_IN_LB_OUT";
479 case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
480 return "OOP_LB_IN_SGL_OUT";
481 case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
482 return "OOP_LB_IN_LB_OUT";
483 case RTE_CRYPTODEV_FF_CPU_NEON:
485 case RTE_CRYPTODEV_FF_CPU_ARM_CE:
487 case RTE_CRYPTODEV_FF_SECURITY:
488 return "SECURITY_PROTOCOL";
494 struct rte_cryptodev *
495 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
497 return &cryptodev_globals.devs[dev_id];
500 struct rte_cryptodev *
501 rte_cryptodev_pmd_get_named_dev(const char *name)
503 struct rte_cryptodev *dev;
509 for (i = 0; i < cryptodev_globals.max_devs; i++) {
510 dev = &cryptodev_globals.devs[i];
512 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
513 (strcmp(dev->data->name, name) == 0))
521 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
523 struct rte_cryptodev *dev = NULL;
525 if (dev_id >= cryptodev_globals.nb_devs)
528 dev = rte_cryptodev_pmd_get_dev(dev_id);
529 if (dev->attached != RTE_CRYPTODEV_ATTACHED)
537 rte_cryptodev_get_dev_id(const char *name)
544 for (i = 0; i < cryptodev_globals.nb_devs; i++)
545 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
547 (cryptodev_globals.devs[i].attached ==
548 RTE_CRYPTODEV_ATTACHED))
555 rte_cryptodev_count(void)
557 return cryptodev_globals.nb_devs;
561 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
563 uint8_t i, dev_count = 0;
565 for (i = 0; i < cryptodev_globals.max_devs; i++)
566 if (cryptodev_globals.devs[i].driver_id == driver_id &&
567 cryptodev_globals.devs[i].attached ==
568 RTE_CRYPTODEV_ATTACHED)
575 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
578 uint8_t i, count = 0;
579 struct rte_cryptodev *devs = cryptodev_globals.devs;
580 uint8_t max_devs = cryptodev_globals.max_devs;
582 for (i = 0; i < max_devs && count < nb_devices; i++) {
584 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
587 cmp = strncmp(devs[i].device->driver->name,
589 strlen(driver_name));
592 devices[count++] = devs[i].data->dev_id;
600 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
602 if (rte_crypto_devices[dev_id].feature_flags &
603 RTE_CRYPTODEV_FF_SECURITY)
604 return rte_crypto_devices[dev_id].security_ctx;
610 rte_cryptodev_socket_id(uint8_t dev_id)
612 struct rte_cryptodev *dev;
614 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
617 dev = rte_cryptodev_pmd_get_dev(dev_id);
619 return dev->data->socket_id;
623 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
626 char mz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
627 const struct rte_memzone *mz;
630 /* generate memzone name */
631 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
632 if (n >= (int)sizeof(mz_name))
635 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
636 mz = rte_memzone_reserve(mz_name,
637 sizeof(struct rte_cryptodev_data),
640 mz = rte_memzone_lookup(mz_name);
646 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
647 memset(*data, 0, sizeof(struct rte_cryptodev_data));
653 rte_cryptodev_find_free_device_index(void)
657 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
658 if (rte_crypto_devices[dev_id].attached ==
659 RTE_CRYPTODEV_DETACHED)
662 return RTE_CRYPTO_MAX_DEVS;
665 struct rte_cryptodev *
666 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
668 struct rte_cryptodev *cryptodev;
671 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
672 CDEV_LOG_ERR("Crypto device with name %s already "
677 dev_id = rte_cryptodev_find_free_device_index();
678 if (dev_id == RTE_CRYPTO_MAX_DEVS) {
679 CDEV_LOG_ERR("Reached maximum number of crypto devices");
683 cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
685 if (cryptodev->data == NULL) {
686 struct rte_cryptodev_data *cryptodev_data =
687 cryptodev_globals.data[dev_id];
689 int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data,
692 if (retval < 0 || cryptodev_data == NULL)
695 cryptodev->data = cryptodev_data;
697 snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN,
700 cryptodev->data->dev_id = dev_id;
701 cryptodev->data->socket_id = socket_id;
702 cryptodev->data->dev_started = 0;
704 /* init user callbacks */
705 TAILQ_INIT(&(cryptodev->link_intr_cbs));
707 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
709 cryptodev_globals.nb_devs++;
716 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
720 if (cryptodev == NULL)
723 /* Close device only if device operations have been set */
724 if (cryptodev->dev_ops) {
725 ret = rte_cryptodev_close(cryptodev->data->dev_id);
730 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
731 cryptodev_globals.nb_devs--;
736 rte_cryptodev_queue_pair_count(uint8_t dev_id)
738 struct rte_cryptodev *dev;
740 dev = &rte_crypto_devices[dev_id];
741 return dev->data->nb_queue_pairs;
745 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
748 struct rte_cryptodev_info dev_info;
752 if ((dev == NULL) || (nb_qpairs < 1)) {
753 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
758 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
759 nb_qpairs, dev->data->dev_id);
761 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
763 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
764 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
766 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
767 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
768 nb_qpairs, dev->data->dev_id);
772 if (dev->data->queue_pairs == NULL) { /* first time configuration */
773 dev->data->queue_pairs = rte_zmalloc_socket(
774 "cryptodev->queue_pairs",
775 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
776 RTE_CACHE_LINE_SIZE, socket_id);
778 if (dev->data->queue_pairs == NULL) {
779 dev->data->nb_queue_pairs = 0;
780 CDEV_LOG_ERR("failed to get memory for qp meta data, "
785 } else { /* re-configure */
787 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
789 qp = dev->data->queue_pairs;
791 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
794 for (i = nb_qpairs; i < old_nb_queues; i++) {
795 ret = (*dev->dev_ops->queue_pair_release)(dev, i);
800 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
801 RTE_CACHE_LINE_SIZE);
803 CDEV_LOG_ERR("failed to realloc qp meta data,"
804 " nb_queues %u", nb_qpairs);
808 if (nb_qpairs > old_nb_queues) {
809 uint16_t new_qs = nb_qpairs - old_nb_queues;
811 memset(qp + old_nb_queues, 0,
812 sizeof(qp[0]) * new_qs);
815 dev->data->queue_pairs = qp;
818 dev->data->nb_queue_pairs = nb_qpairs;
823 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
825 struct rte_cryptodev *dev;
828 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
829 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
833 dev = &rte_crypto_devices[dev_id];
835 if (dev->data->dev_started) {
837 "device %d must be stopped to allow configuration", dev_id);
841 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
843 /* Setup new number of queue pairs and reconfigure device. */
844 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
847 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
852 return (*dev->dev_ops->dev_configure)(dev, config);
857 rte_cryptodev_start(uint8_t dev_id)
859 struct rte_cryptodev *dev;
862 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
864 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
865 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
869 dev = &rte_crypto_devices[dev_id];
871 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
873 if (dev->data->dev_started != 0) {
874 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
879 diag = (*dev->dev_ops->dev_start)(dev);
881 dev->data->dev_started = 1;
889 rte_cryptodev_stop(uint8_t dev_id)
891 struct rte_cryptodev *dev;
893 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
894 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
898 dev = &rte_crypto_devices[dev_id];
900 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
902 if (dev->data->dev_started == 0) {
903 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
908 (*dev->dev_ops->dev_stop)(dev);
909 dev->data->dev_started = 0;
913 rte_cryptodev_close(uint8_t dev_id)
915 struct rte_cryptodev *dev;
918 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
919 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
923 dev = &rte_crypto_devices[dev_id];
925 /* Device must be stopped before it can be closed */
926 if (dev->data->dev_started == 1) {
927 CDEV_LOG_ERR("Device %u must be stopped before closing",
932 /* We can't close the device if there are outstanding sessions in use */
933 if (dev->data->session_pool != NULL) {
934 if (!rte_mempool_full(dev->data->session_pool)) {
935 CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
936 "has sessions still in use, free "
937 "all sessions before calling close",
943 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
944 retval = (*dev->dev_ops->dev_close)(dev);
953 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
954 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
957 struct rte_cryptodev *dev;
959 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
960 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
964 dev = &rte_crypto_devices[dev_id];
965 if (queue_pair_id >= dev->data->nb_queue_pairs) {
966 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
971 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
975 if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
976 (!qp_conf->mp_session && qp_conf->mp_session_private)) {
977 CDEV_LOG_ERR("Invalid mempools\n");
981 if (qp_conf->mp_session) {
982 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
983 uint32_t obj_size = qp_conf->mp_session->elt_size;
984 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
985 struct rte_cryptodev_sym_session s = {0};
987 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
988 if (!pool_priv || qp_conf->mp_session->private_data_size <
989 sizeof(*pool_priv)) {
990 CDEV_LOG_ERR("Invalid mempool\n");
994 s.nb_drivers = pool_priv->nb_drivers;
995 s.user_data_sz = pool_priv->user_data_sz;
997 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
998 obj_size) || (s.nb_drivers <= dev->driver_id) ||
999 rte_cryptodev_sym_get_private_session_size(dev_id) >
1001 CDEV_LOG_ERR("Invalid mempool\n");
1006 if (dev->data->dev_started) {
1008 "device %d must be stopped to allow configuration", dev_id);
1012 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1014 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1020 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1022 struct rte_cryptodev *dev;
1024 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1025 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1029 if (stats == NULL) {
1030 CDEV_LOG_ERR("Invalid stats ptr");
1034 dev = &rte_crypto_devices[dev_id];
1035 memset(stats, 0, sizeof(*stats));
1037 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1038 (*dev->dev_ops->stats_get)(dev, stats);
1043 rte_cryptodev_stats_reset(uint8_t dev_id)
1045 struct rte_cryptodev *dev;
1047 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1048 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1052 dev = &rte_crypto_devices[dev_id];
1054 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1055 (*dev->dev_ops->stats_reset)(dev);
1060 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1062 struct rte_cryptodev *dev;
1064 if (dev_id >= cryptodev_globals.nb_devs) {
1065 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1069 dev = &rte_crypto_devices[dev_id];
1071 memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1073 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1074 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1076 dev_info->driver_name = dev->device->driver->name;
1077 dev_info->device = dev->device;
1082 rte_cryptodev_callback_register(uint8_t dev_id,
1083 enum rte_cryptodev_event_type event,
1084 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1086 struct rte_cryptodev *dev;
1087 struct rte_cryptodev_callback *user_cb;
1092 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1093 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1097 dev = &rte_crypto_devices[dev_id];
1098 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1100 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1101 if (user_cb->cb_fn == cb_fn &&
1102 user_cb->cb_arg == cb_arg &&
1103 user_cb->event == event) {
1108 /* create a new callback. */
1109 if (user_cb == NULL) {
1110 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1111 sizeof(struct rte_cryptodev_callback), 0);
1112 if (user_cb != NULL) {
1113 user_cb->cb_fn = cb_fn;
1114 user_cb->cb_arg = cb_arg;
1115 user_cb->event = event;
1116 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1120 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1121 return (user_cb == NULL) ? -ENOMEM : 0;
1125 rte_cryptodev_callback_unregister(uint8_t dev_id,
1126 enum rte_cryptodev_event_type event,
1127 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1130 struct rte_cryptodev *dev;
1131 struct rte_cryptodev_callback *cb, *next;
1136 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1137 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1141 dev = &rte_crypto_devices[dev_id];
1142 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1145 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1147 next = TAILQ_NEXT(cb, next);
1149 if (cb->cb_fn != cb_fn || cb->event != event ||
1150 (cb->cb_arg != (void *)-1 &&
1151 cb->cb_arg != cb_arg))
1155 * if this callback is not executing right now,
1158 if (cb->active == 0) {
1159 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1166 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1171 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1172 enum rte_cryptodev_event_type event)
1174 struct rte_cryptodev_callback *cb_lst;
1175 struct rte_cryptodev_callback dev_cb;
1177 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1178 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1179 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1183 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1184 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1186 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1189 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1194 rte_cryptodev_sym_session_init(uint8_t dev_id,
1195 struct rte_cryptodev_sym_session *sess,
1196 struct rte_crypto_sym_xform *xforms,
1197 struct rte_mempool *mp)
1199 struct rte_cryptodev *dev;
1200 uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1205 dev = rte_cryptodev_pmd_get_dev(dev_id);
1207 if (sess == NULL || xforms == NULL || dev == NULL)
1210 if (mp->elt_size < sess_priv_sz)
1213 index = dev->driver_id;
1214 if (index >= sess->nb_drivers)
1217 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1219 if (sess->sess_data[index].refcnt == 0) {
1220 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1224 "dev_id %d failed to configure session details",
1230 sess->sess_data[index].refcnt++;
1234 int __rte_experimental
1235 rte_cryptodev_asym_session_init(uint8_t dev_id,
1236 struct rte_cryptodev_asym_session *sess,
1237 struct rte_crypto_asym_xform *xforms,
1238 struct rte_mempool *mp)
1240 struct rte_cryptodev *dev;
1244 dev = rte_cryptodev_pmd_get_dev(dev_id);
1246 if (sess == NULL || xforms == NULL || dev == NULL)
1249 index = dev->driver_id;
1251 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1254 if (sess->sess_private_data[index] == NULL) {
1255 ret = dev->dev_ops->asym_session_configure(dev,
1260 "dev_id %d failed to configure session details",
1269 struct rte_mempool * __rte_experimental
1270 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1271 uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1274 struct rte_mempool *mp;
1275 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1278 obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1279 if (obj_sz > elt_size)
1280 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1285 mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1286 (uint32_t)(sizeof(*pool_priv)),
1287 NULL, NULL, NULL, NULL,
1290 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1291 __func__, name, rte_errno);
1295 pool_priv = rte_mempool_get_priv(mp);
1297 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1299 rte_mempool_free(mp);
1303 pool_priv->nb_drivers = nb_drivers;
1304 pool_priv->user_data_sz = user_data_size;
1310 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1312 return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1316 struct rte_cryptodev_sym_session *
1317 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1319 struct rte_cryptodev_sym_session *sess;
1320 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1323 CDEV_LOG_ERR("Invalid mempool\n");
1327 pool_priv = rte_mempool_get_priv(mp);
1329 if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) {
1330 CDEV_LOG_ERR("Invalid mempool\n");
1334 /* Allocate a session structure from the session pool */
1335 if (rte_mempool_get(mp, (void **)&sess)) {
1336 CDEV_LOG_ERR("couldn't get object from session mempool");
1340 sess->nb_drivers = pool_priv->nb_drivers;
1341 sess->user_data_sz = pool_priv->user_data_sz;
1342 sess->opaque_data = 0;
1344 /* Clear device session pointer.
1345 * Include the flag indicating presence of user data
1347 memset(sess->sess_data, 0,
1348 rte_cryptodev_sym_session_data_size(sess));
1353 struct rte_cryptodev_asym_session * __rte_experimental
1354 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1356 struct rte_cryptodev_asym_session *sess;
1358 /* Allocate a session structure from the session pool */
1359 if (rte_mempool_get(mp, (void **)&sess)) {
1360 CDEV_LOG_ERR("couldn't get object from session mempool");
1364 /* Clear device session pointer.
1365 * Include the flag indicating presence of private data
1367 memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1373 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1374 struct rte_cryptodev_sym_session *sess)
1376 struct rte_cryptodev *dev;
1379 dev = rte_cryptodev_pmd_get_dev(dev_id);
1381 if (dev == NULL || sess == NULL)
1384 driver_id = dev->driver_id;
1385 if (--sess->sess_data[driver_id].refcnt != 0)
1388 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1390 dev->dev_ops->sym_session_clear(dev, sess);
1395 int __rte_experimental
1396 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1397 struct rte_cryptodev_asym_session *sess)
1399 struct rte_cryptodev *dev;
1401 dev = rte_cryptodev_pmd_get_dev(dev_id);
1403 if (dev == NULL || sess == NULL)
1406 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1408 dev->dev_ops->asym_session_clear(dev, sess);
1414 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1417 struct rte_mempool *sess_mp;
1422 /* Check that all device private data has been freed */
1423 for (i = 0; i < sess->nb_drivers; i++) {
1424 if (sess->sess_data[i].refcnt != 0)
1428 /* Return session to mempool */
1429 sess_mp = rte_mempool_from_obj(sess);
1430 rte_mempool_put(sess_mp, sess);
1435 int __rte_experimental
1436 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1440 struct rte_mempool *sess_mp;
1445 /* Check that all device private data has been freed */
1446 for (i = 0; i < nb_drivers; i++) {
1447 sess_priv = get_asym_session_private_data(sess, i);
1448 if (sess_priv != NULL)
1452 /* Return session to mempool */
1453 sess_mp = rte_mempool_from_obj(sess);
1454 rte_mempool_put(sess_mp, sess);
1460 rte_cryptodev_sym_get_header_session_size(void)
1463 * Header contains pointers to the private data of all registered
1464 * drivers and all necessary information to ensure safely clear
1465 * or free al session.
1467 struct rte_cryptodev_sym_session s = {0};
1469 s.nb_drivers = nb_drivers;
1471 return (unsigned int)(sizeof(s) +
1472 rte_cryptodev_sym_session_data_size(&s));
1475 unsigned int __rte_experimental
1476 rte_cryptodev_sym_get_existing_header_session_size(
1477 struct rte_cryptodev_sym_session *sess)
1482 return (unsigned int)(sizeof(*sess) +
1483 rte_cryptodev_sym_session_data_size(sess));
1486 unsigned int __rte_experimental
1487 rte_cryptodev_asym_get_header_session_size(void)
1490 * Header contains pointers to the private data
1491 * of all registered drivers, and a flag which
1492 * indicates presence of private data
1494 return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1498 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1500 struct rte_cryptodev *dev;
1501 unsigned int priv_sess_size;
1503 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1506 dev = rte_cryptodev_pmd_get_dev(dev_id);
1508 if (*dev->dev_ops->sym_session_get_size == NULL)
1511 priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1513 return priv_sess_size;
1516 unsigned int __rte_experimental
1517 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1519 struct rte_cryptodev *dev;
1520 unsigned int header_size = sizeof(void *) * nb_drivers;
1521 unsigned int priv_sess_size;
1523 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1526 dev = rte_cryptodev_pmd_get_dev(dev_id);
1528 if (*dev->dev_ops->asym_session_get_size == NULL)
1531 priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1532 if (priv_sess_size < header_size)
1535 return priv_sess_size;
1539 int __rte_experimental
1540 rte_cryptodev_sym_session_set_user_data(
1541 struct rte_cryptodev_sym_session *sess,
1548 if (sess->user_data_sz < size)
1551 rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1555 void * __rte_experimental
1556 rte_cryptodev_sym_session_get_user_data(
1557 struct rte_cryptodev_sym_session *sess)
1559 if (sess == NULL || sess->user_data_sz == 0)
1562 return (void *)(sess->sess_data + sess->nb_drivers);
1565 /** Initialise rte_crypto_op mempool element */
1567 rte_crypto_op_init(struct rte_mempool *mempool,
1570 __rte_unused unsigned i)
1572 struct rte_crypto_op *op = _op_data;
1573 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1575 memset(_op_data, 0, mempool->elt_size);
1577 __rte_crypto_op_reset(op, type);
1579 op->phys_addr = rte_mem_virt2iova(_op_data);
1580 op->mempool = mempool;
1584 struct rte_mempool *
1585 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1586 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1589 struct rte_crypto_op_pool_private *priv;
1591 unsigned elt_size = sizeof(struct rte_crypto_op) +
1594 if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1595 elt_size += sizeof(struct rte_crypto_sym_op);
1596 } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1597 elt_size += sizeof(struct rte_crypto_asym_op);
1598 } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1599 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1600 sizeof(struct rte_crypto_asym_op));
1602 CDEV_LOG_ERR("Invalid op_type\n");
1606 /* lookup mempool in case already allocated */
1607 struct rte_mempool *mp = rte_mempool_lookup(name);
1610 priv = (struct rte_crypto_op_pool_private *)
1611 rte_mempool_get_priv(mp);
1613 if (mp->elt_size != elt_size ||
1614 mp->cache_size < cache_size ||
1615 mp->size < nb_elts ||
1616 priv->priv_size < priv_size) {
1618 CDEV_LOG_ERR("Mempool %s already exists but with "
1619 "incompatible parameters", name);
1625 mp = rte_mempool_create(
1630 sizeof(struct rte_crypto_op_pool_private),
1639 CDEV_LOG_ERR("Failed to create mempool %s", name);
1643 priv = (struct rte_crypto_op_pool_private *)
1644 rte_mempool_get_priv(mp);
1646 priv->priv_size = priv_size;
1653 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1655 struct rte_cryptodev *dev = NULL;
1661 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1662 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1663 "%s_%u", dev_name_prefix, i);
1668 dev = rte_cryptodev_pmd_get_named_dev(name);
1676 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1678 static struct cryptodev_driver_list cryptodev_driver_list =
1679 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1682 rte_cryptodev_driver_id_get(const char *name)
1684 struct cryptodev_driver *driver;
1685 const char *driver_name;
1688 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1692 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1693 driver_name = driver->driver->name;
1694 if (strncmp(driver_name, name, strlen(driver_name)) == 0)
1701 rte_cryptodev_name_get(uint8_t dev_id)
1703 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id);
1708 return dev->data->name;
1712 rte_cryptodev_driver_name_get(uint8_t driver_id)
1714 struct cryptodev_driver *driver;
1716 TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1717 if (driver->id == driver_id)
1718 return driver->driver->name;
1723 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1724 const struct rte_driver *drv)
1726 crypto_drv->driver = drv;
1727 crypto_drv->id = nb_drivers;
1729 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1731 return nb_drivers++;