1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
15 #include <netinet/in.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
44 static uint8_t nb_drivers;
46 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
48 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
50 static struct rte_cryptodev_global cryptodev_globals = {
51 .devs = rte_crypto_devices,
56 /* spinlock for crypto device callbacks */
57 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
61 * The user application callback description.
63 * It contains callback address to be registered by user application,
64 * the pointer to the parameters for callback, and the event type.
66 struct rte_cryptodev_callback {
67 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
68 rte_cryptodev_cb_fn cb_fn; /**< Callback address */
69 void *cb_arg; /**< Parameter for callback */
70 enum rte_cryptodev_event_type event; /**< Interrupt event type */
71 uint32_t active; /**< Callback is executing */
75 * The crypto cipher algorithm strings identifiers.
76 * It could be used in application command line.
79 rte_crypto_cipher_algorithm_strings[] = {
80 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc",
81 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb",
82 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr",
84 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc",
85 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr",
86 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi",
87 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb",
88 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8",
89 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts",
91 [RTE_CRYPTO_CIPHER_ARC4] = "arc4",
93 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc",
94 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi",
96 [RTE_CRYPTO_CIPHER_NULL] = "null",
98 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8",
99 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
100 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3"
104 * The crypto cipher operation strings identifiers.
105 * It could be used in application command line.
108 rte_crypto_cipher_operation_strings[] = {
109 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt",
110 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt"
114 * The crypto auth algorithm strings identifiers.
115 * It could be used in application command line.
118 rte_crypto_auth_algorithm_strings[] = {
119 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac",
120 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac",
121 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac",
122 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac",
124 [RTE_CRYPTO_AUTH_MD5] = "md5",
125 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac",
127 [RTE_CRYPTO_AUTH_NULL] = "null",
129 [RTE_CRYPTO_AUTH_SHA1] = "sha1",
130 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac",
132 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224",
133 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac",
134 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256",
135 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac",
136 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384",
137 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac",
138 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512",
139 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac",
141 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9",
142 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2",
143 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3"
147 * The crypto AEAD algorithm strings identifiers.
148 * It could be used in application command line.
151 rte_crypto_aead_algorithm_strings[] = {
152 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm",
153 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm",
154 [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
158 * The crypto AEAD operation strings identifiers.
159 * It could be used in application command line.
162 rte_crypto_aead_operation_strings[] = {
163 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt",
164 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt"
168 * Asymmetric crypto transform operation strings identifiers.
170 const char *rte_crypto_asym_xform_strings[] = {
171 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none",
172 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa",
173 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp",
174 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv",
175 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh",
176 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa",
180 * Asymmetric crypto operation strings identifiers.
182 const char *rte_crypto_asym_op_strings[] = {
183 [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt",
184 [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt",
185 [RTE_CRYPTO_ASYM_OP_SIGN] = "sign",
186 [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify",
187 [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE] = "priv_key_generate",
188 [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
189 [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
193 * The private data structure stored in the session mempool private data.
195 struct rte_cryptodev_sym_session_pool_private_data {
197 /**< number of elements in sess_data array */
198 uint16_t user_data_sz;
199 /**< session user data will be placed after sess_data */
203 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
204 const char *algo_string)
208 for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
209 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
210 *algo_enum = (enum rte_crypto_cipher_algorithm) i;
220 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
221 const char *algo_string)
225 for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
226 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
227 *algo_enum = (enum rte_crypto_auth_algorithm) i;
237 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
238 const char *algo_string)
242 for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
243 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
244 *algo_enum = (enum rte_crypto_aead_algorithm) i;
254 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
255 const char *xform_string)
259 for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
260 if (strcmp(xform_string,
261 rte_crypto_asym_xform_strings[i]) == 0) {
262 *xform_enum = (enum rte_crypto_asym_xform_type) i;
272 * The crypto auth operation strings identifiers.
273 * It could be used in application command line.
276 rte_crypto_auth_operation_strings[] = {
277 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify",
278 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate"
281 const struct rte_cryptodev_symmetric_capability *
282 rte_cryptodev_sym_capability_get(uint8_t dev_id,
283 const struct rte_cryptodev_sym_capability_idx *idx)
285 const struct rte_cryptodev_capabilities *capability;
286 struct rte_cryptodev_info dev_info;
289 rte_cryptodev_info_get(dev_id, &dev_info);
291 while ((capability = &dev_info.capabilities[i++])->op !=
292 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
293 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
296 if (capability->sym.xform_type != idx->type)
299 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
300 capability->sym.auth.algo == idx->algo.auth)
301 return &capability->sym;
303 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
304 capability->sym.cipher.algo == idx->algo.cipher)
305 return &capability->sym;
307 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
308 capability->sym.aead.algo == idx->algo.aead)
309 return &capability->sym;
317 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
319 unsigned int next_size;
321 /* Check lower/upper bounds */
322 if (size < range->min)
325 if (size > range->max)
328 /* If range is actually only one value, size is correct */
329 if (range->increment == 0)
332 /* Check if value is one of the supported sizes */
333 for (next_size = range->min; next_size <= range->max;
334 next_size += range->increment)
335 if (size == next_size)
341 const struct rte_cryptodev_asymmetric_xform_capability *
342 rte_cryptodev_asym_capability_get(uint8_t dev_id,
343 const struct rte_cryptodev_asym_capability_idx *idx)
345 const struct rte_cryptodev_capabilities *capability;
346 struct rte_cryptodev_info dev_info;
349 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
350 rte_cryptodev_info_get(dev_id, &dev_info);
352 while ((capability = &dev_info.capabilities[i++])->op !=
353 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
354 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
357 if (capability->asym.xform_capa.xform_type == idx->type)
358 return &capability->asym.xform_capa;
364 rte_cryptodev_sym_capability_check_cipher(
365 const struct rte_cryptodev_symmetric_capability *capability,
366 uint16_t key_size, uint16_t iv_size)
368 if (param_range_check(key_size, &capability->cipher.key_size) != 0)
371 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
378 rte_cryptodev_sym_capability_check_auth(
379 const struct rte_cryptodev_symmetric_capability *capability,
380 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
382 if (param_range_check(key_size, &capability->auth.key_size) != 0)
385 if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
388 if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
395 rte_cryptodev_sym_capability_check_aead(
396 const struct rte_cryptodev_symmetric_capability *capability,
397 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
400 if (param_range_check(key_size, &capability->aead.key_size) != 0)
403 if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
406 if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
409 if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
415 rte_cryptodev_asym_xform_capability_check_optype(
416 const struct rte_cryptodev_asymmetric_xform_capability *capability,
417 enum rte_crypto_asym_op_type op_type)
419 if (capability->op_types & (1 << op_type))
426 rte_cryptodev_asym_xform_capability_check_modlen(
427 const struct rte_cryptodev_asymmetric_xform_capability *capability,
430 /* no need to check for limits, if min or max = 0 */
431 if (capability->modlen.min != 0) {
432 if (modlen < capability->modlen.min)
436 if (capability->modlen.max != 0) {
437 if (modlen > capability->modlen.max)
441 /* in any case, check if given modlen is module increment */
442 if (capability->modlen.increment != 0) {
443 if (modlen % (capability->modlen.increment))
452 rte_cryptodev_get_feature_name(uint64_t flag)
455 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
456 return "SYMMETRIC_CRYPTO";
457 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
458 return "ASYMMETRIC_CRYPTO";
459 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
460 return "SYM_OPERATION_CHAINING";
461 case RTE_CRYPTODEV_FF_CPU_SSE:
463 case RTE_CRYPTODEV_FF_CPU_AVX:
465 case RTE_CRYPTODEV_FF_CPU_AVX2:
467 case RTE_CRYPTODEV_FF_CPU_AVX512:
469 case RTE_CRYPTODEV_FF_CPU_AESNI:
471 case RTE_CRYPTODEV_FF_HW_ACCELERATED:
472 return "HW_ACCELERATED";
473 case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
474 return "IN_PLACE_SGL";
475 case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
476 return "OOP_SGL_IN_SGL_OUT";
477 case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
478 return "OOP_SGL_IN_LB_OUT";
479 case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
480 return "OOP_LB_IN_SGL_OUT";
481 case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
482 return "OOP_LB_IN_LB_OUT";
483 case RTE_CRYPTODEV_FF_CPU_NEON:
485 case RTE_CRYPTODEV_FF_CPU_ARM_CE:
487 case RTE_CRYPTODEV_FF_SECURITY:
488 return "SECURITY_PROTOCOL";
489 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
490 return "RSA_PRIV_OP_KEY_EXP";
491 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
492 return "RSA_PRIV_OP_KEY_QT";
493 case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
494 return "DIGEST_ENCRYPTED";
500 struct rte_cryptodev *
501 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
503 return &cryptodev_globals.devs[dev_id];
506 struct rte_cryptodev *
507 rte_cryptodev_pmd_get_named_dev(const char *name)
509 struct rte_cryptodev *dev;
515 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
516 dev = &cryptodev_globals.devs[i];
518 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
519 (strcmp(dev->data->name, name) == 0))
526 static inline uint8_t
527 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
529 if (rte_crypto_devices[dev_id].data == NULL)
536 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
538 struct rte_cryptodev *dev = NULL;
540 if (!rte_cryptodev_is_valid_device_data(dev_id))
543 dev = rte_cryptodev_pmd_get_dev(dev_id);
544 if (dev->attached != RTE_CRYPTODEV_ATTACHED)
552 rte_cryptodev_get_dev_id(const char *name)
559 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
560 if (!rte_cryptodev_is_valid_device_data(i))
562 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
564 (cryptodev_globals.devs[i].attached ==
565 RTE_CRYPTODEV_ATTACHED))
573 rte_cryptodev_count(void)
575 return cryptodev_globals.nb_devs;
579 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
581 uint8_t i, dev_count = 0;
583 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
584 if (cryptodev_globals.devs[i].driver_id == driver_id &&
585 cryptodev_globals.devs[i].attached ==
586 RTE_CRYPTODEV_ATTACHED)
593 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
596 uint8_t i, count = 0;
597 struct rte_cryptodev *devs = cryptodev_globals.devs;
599 for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
600 if (!rte_cryptodev_is_valid_device_data(i))
603 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
606 cmp = strncmp(devs[i].device->driver->name,
608 strlen(driver_name) + 1);
611 devices[count++] = devs[i].data->dev_id;
619 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
621 if (rte_crypto_devices[dev_id].feature_flags &
622 RTE_CRYPTODEV_FF_SECURITY)
623 return rte_crypto_devices[dev_id].security_ctx;
629 rte_cryptodev_socket_id(uint8_t dev_id)
631 struct rte_cryptodev *dev;
633 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
636 dev = rte_cryptodev_pmd_get_dev(dev_id);
638 return dev->data->socket_id;
642 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
645 char mz_name[RTE_MEMZONE_NAMESIZE];
646 const struct rte_memzone *mz;
649 /* generate memzone name */
650 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
651 if (n >= (int)sizeof(mz_name))
654 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
655 mz = rte_memzone_reserve(mz_name,
656 sizeof(struct rte_cryptodev_data),
659 mz = rte_memzone_lookup(mz_name);
665 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
666 memset(*data, 0, sizeof(struct rte_cryptodev_data));
672 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
674 char mz_name[RTE_MEMZONE_NAMESIZE];
675 const struct rte_memzone *mz;
678 /* generate memzone name */
679 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
680 if (n >= (int)sizeof(mz_name))
683 mz = rte_memzone_lookup(mz_name);
687 RTE_ASSERT(*data == mz->addr);
690 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
691 return rte_memzone_free(mz);
697 rte_cryptodev_find_free_device_index(void)
701 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
702 if (rte_crypto_devices[dev_id].attached ==
703 RTE_CRYPTODEV_DETACHED)
706 return RTE_CRYPTO_MAX_DEVS;
709 struct rte_cryptodev *
710 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
712 struct rte_cryptodev *cryptodev;
715 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
716 CDEV_LOG_ERR("Crypto device with name %s already "
721 dev_id = rte_cryptodev_find_free_device_index();
722 if (dev_id == RTE_CRYPTO_MAX_DEVS) {
723 CDEV_LOG_ERR("Reached maximum number of crypto devices");
727 cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
729 if (cryptodev->data == NULL) {
730 struct rte_cryptodev_data **cryptodev_data =
731 &cryptodev_globals.data[dev_id];
733 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
736 if (retval < 0 || *cryptodev_data == NULL)
739 cryptodev->data = *cryptodev_data;
741 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
742 strlcpy(cryptodev->data->name, name,
743 RTE_CRYPTODEV_NAME_MAX_LEN);
745 cryptodev->data->dev_id = dev_id;
746 cryptodev->data->socket_id = socket_id;
747 cryptodev->data->dev_started = 0;
750 /* init user callbacks */
751 TAILQ_INIT(&(cryptodev->link_intr_cbs));
753 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
755 cryptodev_globals.nb_devs++;
762 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
767 if (cryptodev == NULL)
770 dev_id = cryptodev->data->dev_id;
772 /* Close device only if device operations have been set */
773 if (cryptodev->dev_ops) {
774 ret = rte_cryptodev_close(dev_id);
779 ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
783 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
784 cryptodev_globals.nb_devs--;
789 rte_cryptodev_queue_pair_count(uint8_t dev_id)
791 struct rte_cryptodev *dev;
793 dev = &rte_crypto_devices[dev_id];
794 return dev->data->nb_queue_pairs;
798 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
801 struct rte_cryptodev_info dev_info;
805 if ((dev == NULL) || (nb_qpairs < 1)) {
806 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
811 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
812 nb_qpairs, dev->data->dev_id);
814 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
816 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
817 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
819 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
820 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
821 nb_qpairs, dev->data->dev_id);
825 if (dev->data->queue_pairs == NULL) { /* first time configuration */
826 dev->data->queue_pairs = rte_zmalloc_socket(
827 "cryptodev->queue_pairs",
828 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
829 RTE_CACHE_LINE_SIZE, socket_id);
831 if (dev->data->queue_pairs == NULL) {
832 dev->data->nb_queue_pairs = 0;
833 CDEV_LOG_ERR("failed to get memory for qp meta data, "
838 } else { /* re-configure */
840 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
842 qp = dev->data->queue_pairs;
844 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
847 for (i = nb_qpairs; i < old_nb_queues; i++) {
848 ret = (*dev->dev_ops->queue_pair_release)(dev, i);
853 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
854 RTE_CACHE_LINE_SIZE);
856 CDEV_LOG_ERR("failed to realloc qp meta data,"
857 " nb_queues %u", nb_qpairs);
861 if (nb_qpairs > old_nb_queues) {
862 uint16_t new_qs = nb_qpairs - old_nb_queues;
864 memset(qp + old_nb_queues, 0,
865 sizeof(qp[0]) * new_qs);
868 dev->data->queue_pairs = qp;
871 dev->data->nb_queue_pairs = nb_qpairs;
876 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
878 struct rte_cryptodev *dev;
881 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
882 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
886 dev = &rte_crypto_devices[dev_id];
888 if (dev->data->dev_started) {
890 "device %d must be stopped to allow configuration", dev_id);
894 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
896 /* Setup new number of queue pairs and reconfigure device. */
897 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
900 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
905 return (*dev->dev_ops->dev_configure)(dev, config);
910 rte_cryptodev_start(uint8_t dev_id)
912 struct rte_cryptodev *dev;
915 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
917 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
918 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
922 dev = &rte_crypto_devices[dev_id];
924 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
926 if (dev->data->dev_started != 0) {
927 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
932 diag = (*dev->dev_ops->dev_start)(dev);
934 dev->data->dev_started = 1;
942 rte_cryptodev_stop(uint8_t dev_id)
944 struct rte_cryptodev *dev;
946 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
947 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
951 dev = &rte_crypto_devices[dev_id];
953 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
955 if (dev->data->dev_started == 0) {
956 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
961 (*dev->dev_ops->dev_stop)(dev);
962 dev->data->dev_started = 0;
966 rte_cryptodev_close(uint8_t dev_id)
968 struct rte_cryptodev *dev;
971 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
972 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
976 dev = &rte_crypto_devices[dev_id];
978 /* Device must be stopped before it can be closed */
979 if (dev->data->dev_started == 1) {
980 CDEV_LOG_ERR("Device %u must be stopped before closing",
985 /* We can't close the device if there are outstanding sessions in use */
986 if (dev->data->session_pool != NULL) {
987 if (!rte_mempool_full(dev->data->session_pool)) {
988 CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
989 "has sessions still in use, free "
990 "all sessions before calling close",
996 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
997 retval = (*dev->dev_ops->dev_close)(dev);
1006 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1007 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1010 struct rte_cryptodev *dev;
1012 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1013 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1017 dev = &rte_crypto_devices[dev_id];
1018 if (queue_pair_id >= dev->data->nb_queue_pairs) {
1019 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1024 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1028 if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1029 (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1030 CDEV_LOG_ERR("Invalid mempools\n");
1034 if (qp_conf->mp_session) {
1035 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1036 uint32_t obj_size = qp_conf->mp_session->elt_size;
1037 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1038 struct rte_cryptodev_sym_session s = {0};
1040 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1041 if (!pool_priv || qp_conf->mp_session->private_data_size <
1042 sizeof(*pool_priv)) {
1043 CDEV_LOG_ERR("Invalid mempool\n");
1047 s.nb_drivers = pool_priv->nb_drivers;
1048 s.user_data_sz = pool_priv->user_data_sz;
1050 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1051 obj_size) || (s.nb_drivers <= dev->driver_id) ||
1052 rte_cryptodev_sym_get_private_session_size(dev_id) >
1054 CDEV_LOG_ERR("Invalid mempool\n");
1059 if (dev->data->dev_started) {
1061 "device %d must be stopped to allow configuration", dev_id);
1065 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1067 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1073 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1075 struct rte_cryptodev *dev;
1077 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1078 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1082 if (stats == NULL) {
1083 CDEV_LOG_ERR("Invalid stats ptr");
1087 dev = &rte_crypto_devices[dev_id];
1088 memset(stats, 0, sizeof(*stats));
1090 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1091 (*dev->dev_ops->stats_get)(dev, stats);
1096 rte_cryptodev_stats_reset(uint8_t dev_id)
1098 struct rte_cryptodev *dev;
1100 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1101 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1105 dev = &rte_crypto_devices[dev_id];
1107 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1108 (*dev->dev_ops->stats_reset)(dev);
1113 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1115 struct rte_cryptodev *dev;
1117 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1118 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1122 dev = &rte_crypto_devices[dev_id];
1124 memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1126 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1127 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1129 dev_info->driver_name = dev->device->driver->name;
1130 dev_info->device = dev->device;
1135 rte_cryptodev_callback_register(uint8_t dev_id,
1136 enum rte_cryptodev_event_type event,
1137 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1139 struct rte_cryptodev *dev;
1140 struct rte_cryptodev_callback *user_cb;
1145 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1146 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1150 dev = &rte_crypto_devices[dev_id];
1151 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1153 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1154 if (user_cb->cb_fn == cb_fn &&
1155 user_cb->cb_arg == cb_arg &&
1156 user_cb->event == event) {
1161 /* create a new callback. */
1162 if (user_cb == NULL) {
1163 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1164 sizeof(struct rte_cryptodev_callback), 0);
1165 if (user_cb != NULL) {
1166 user_cb->cb_fn = cb_fn;
1167 user_cb->cb_arg = cb_arg;
1168 user_cb->event = event;
1169 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1173 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1174 return (user_cb == NULL) ? -ENOMEM : 0;
1178 rte_cryptodev_callback_unregister(uint8_t dev_id,
1179 enum rte_cryptodev_event_type event,
1180 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1183 struct rte_cryptodev *dev;
1184 struct rte_cryptodev_callback *cb, *next;
1189 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1190 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1194 dev = &rte_crypto_devices[dev_id];
1195 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1198 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1200 next = TAILQ_NEXT(cb, next);
1202 if (cb->cb_fn != cb_fn || cb->event != event ||
1203 (cb->cb_arg != (void *)-1 &&
1204 cb->cb_arg != cb_arg))
1208 * if this callback is not executing right now,
1211 if (cb->active == 0) {
1212 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1219 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1224 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1225 enum rte_cryptodev_event_type event)
1227 struct rte_cryptodev_callback *cb_lst;
1228 struct rte_cryptodev_callback dev_cb;
1230 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1231 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1232 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1236 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1237 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1239 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1242 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1247 rte_cryptodev_sym_session_init(uint8_t dev_id,
1248 struct rte_cryptodev_sym_session *sess,
1249 struct rte_crypto_sym_xform *xforms,
1250 struct rte_mempool *mp)
1252 struct rte_cryptodev *dev;
1253 uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1258 dev = rte_cryptodev_pmd_get_dev(dev_id);
1260 if (sess == NULL || xforms == NULL || dev == NULL)
1263 if (mp->elt_size < sess_priv_sz)
1266 index = dev->driver_id;
1267 if (index >= sess->nb_drivers)
1270 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1272 if (sess->sess_data[index].refcnt == 0) {
1273 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1277 "dev_id %d failed to configure session details",
1283 sess->sess_data[index].refcnt++;
1288 rte_cryptodev_asym_session_init(uint8_t dev_id,
1289 struct rte_cryptodev_asym_session *sess,
1290 struct rte_crypto_asym_xform *xforms,
1291 struct rte_mempool *mp)
1293 struct rte_cryptodev *dev;
1297 dev = rte_cryptodev_pmd_get_dev(dev_id);
1299 if (sess == NULL || xforms == NULL || dev == NULL)
1302 index = dev->driver_id;
1304 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1307 if (sess->sess_private_data[index] == NULL) {
1308 ret = dev->dev_ops->asym_session_configure(dev,
1313 "dev_id %d failed to configure session details",
1322 struct rte_mempool *
1323 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1324 uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1327 struct rte_mempool *mp;
1328 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1331 obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1332 if (obj_sz > elt_size)
1333 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1338 mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1339 (uint32_t)(sizeof(*pool_priv)),
1340 NULL, NULL, NULL, NULL,
1343 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1344 __func__, name, rte_errno);
1348 pool_priv = rte_mempool_get_priv(mp);
1350 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1352 rte_mempool_free(mp);
1356 pool_priv->nb_drivers = nb_drivers;
1357 pool_priv->user_data_sz = user_data_size;
1363 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1365 return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1369 struct rte_cryptodev_sym_session *
1370 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1372 struct rte_cryptodev_sym_session *sess;
1373 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1376 CDEV_LOG_ERR("Invalid mempool\n");
1380 pool_priv = rte_mempool_get_priv(mp);
1382 if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) {
1383 CDEV_LOG_ERR("Invalid mempool\n");
1387 /* Allocate a session structure from the session pool */
1388 if (rte_mempool_get(mp, (void **)&sess)) {
1389 CDEV_LOG_ERR("couldn't get object from session mempool");
1393 sess->nb_drivers = pool_priv->nb_drivers;
1394 sess->user_data_sz = pool_priv->user_data_sz;
1395 sess->opaque_data = 0;
1397 /* Clear device session pointer.
1398 * Include the flag indicating presence of user data
1400 memset(sess->sess_data, 0,
1401 rte_cryptodev_sym_session_data_size(sess));
1406 struct rte_cryptodev_asym_session *
1407 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1409 struct rte_cryptodev_asym_session *sess;
1411 /* Allocate a session structure from the session pool */
1412 if (rte_mempool_get(mp, (void **)&sess)) {
1413 CDEV_LOG_ERR("couldn't get object from session mempool");
1417 /* Clear device session pointer.
1418 * Include the flag indicating presence of private data
1420 memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1426 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1427 struct rte_cryptodev_sym_session *sess)
1429 struct rte_cryptodev *dev;
1432 dev = rte_cryptodev_pmd_get_dev(dev_id);
1434 if (dev == NULL || sess == NULL)
1437 driver_id = dev->driver_id;
1438 if (sess->sess_data[driver_id].refcnt == 0)
1440 if (--sess->sess_data[driver_id].refcnt != 0)
1443 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1445 dev->dev_ops->sym_session_clear(dev, sess);
1451 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1452 struct rte_cryptodev_asym_session *sess)
1454 struct rte_cryptodev *dev;
1456 dev = rte_cryptodev_pmd_get_dev(dev_id);
1458 if (dev == NULL || sess == NULL)
1461 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1463 dev->dev_ops->asym_session_clear(dev, sess);
1469 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1472 struct rte_mempool *sess_mp;
1477 /* Check that all device private data has been freed */
1478 for (i = 0; i < sess->nb_drivers; i++) {
1479 if (sess->sess_data[i].refcnt != 0)
1483 /* Return session to mempool */
1484 sess_mp = rte_mempool_from_obj(sess);
1485 rte_mempool_put(sess_mp, sess);
1491 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1495 struct rte_mempool *sess_mp;
1500 /* Check that all device private data has been freed */
1501 for (i = 0; i < nb_drivers; i++) {
1502 sess_priv = get_asym_session_private_data(sess, i);
1503 if (sess_priv != NULL)
1507 /* Return session to mempool */
1508 sess_mp = rte_mempool_from_obj(sess);
1509 rte_mempool_put(sess_mp, sess);
1515 rte_cryptodev_sym_get_header_session_size(void)
1518 * Header contains pointers to the private data of all registered
1519 * drivers and all necessary information to ensure safely clear
1520 * or free al session.
1522 struct rte_cryptodev_sym_session s = {0};
1524 s.nb_drivers = nb_drivers;
1526 return (unsigned int)(sizeof(s) +
1527 rte_cryptodev_sym_session_data_size(&s));
1531 rte_cryptodev_sym_get_existing_header_session_size(
1532 struct rte_cryptodev_sym_session *sess)
1537 return (unsigned int)(sizeof(*sess) +
1538 rte_cryptodev_sym_session_data_size(sess));
1542 rte_cryptodev_asym_get_header_session_size(void)
1545 * Header contains pointers to the private data
1546 * of all registered drivers, and a flag which
1547 * indicates presence of private data
1549 return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1553 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1555 struct rte_cryptodev *dev;
1556 unsigned int priv_sess_size;
1558 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1561 dev = rte_cryptodev_pmd_get_dev(dev_id);
1563 if (*dev->dev_ops->sym_session_get_size == NULL)
1566 priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1568 return priv_sess_size;
1572 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1574 struct rte_cryptodev *dev;
1575 unsigned int header_size = sizeof(void *) * nb_drivers;
1576 unsigned int priv_sess_size;
1578 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1581 dev = rte_cryptodev_pmd_get_dev(dev_id);
1583 if (*dev->dev_ops->asym_session_get_size == NULL)
1586 priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1587 if (priv_sess_size < header_size)
1590 return priv_sess_size;
1595 rte_cryptodev_sym_session_set_user_data(
1596 struct rte_cryptodev_sym_session *sess,
1603 if (sess->user_data_sz < size)
1606 rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1611 rte_cryptodev_sym_session_get_user_data(
1612 struct rte_cryptodev_sym_session *sess)
1614 if (sess == NULL || sess->user_data_sz == 0)
1617 return (void *)(sess->sess_data + sess->nb_drivers);
1620 /** Initialise rte_crypto_op mempool element */
1622 rte_crypto_op_init(struct rte_mempool *mempool,
1625 __rte_unused unsigned i)
1627 struct rte_crypto_op *op = _op_data;
1628 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1630 memset(_op_data, 0, mempool->elt_size);
1632 __rte_crypto_op_reset(op, type);
1634 op->phys_addr = rte_mem_virt2iova(_op_data);
1635 op->mempool = mempool;
1639 struct rte_mempool *
1640 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1641 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1644 struct rte_crypto_op_pool_private *priv;
1646 unsigned elt_size = sizeof(struct rte_crypto_op) +
1649 if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1650 elt_size += sizeof(struct rte_crypto_sym_op);
1651 } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1652 elt_size += sizeof(struct rte_crypto_asym_op);
1653 } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1654 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1655 sizeof(struct rte_crypto_asym_op));
1657 CDEV_LOG_ERR("Invalid op_type\n");
1661 /* lookup mempool in case already allocated */
1662 struct rte_mempool *mp = rte_mempool_lookup(name);
1665 priv = (struct rte_crypto_op_pool_private *)
1666 rte_mempool_get_priv(mp);
1668 if (mp->elt_size != elt_size ||
1669 mp->cache_size < cache_size ||
1670 mp->size < nb_elts ||
1671 priv->priv_size < priv_size) {
1673 CDEV_LOG_ERR("Mempool %s already exists but with "
1674 "incompatible parameters", name);
1680 mp = rte_mempool_create(
1685 sizeof(struct rte_crypto_op_pool_private),
1694 CDEV_LOG_ERR("Failed to create mempool %s", name);
1698 priv = (struct rte_crypto_op_pool_private *)
1699 rte_mempool_get_priv(mp);
1701 priv->priv_size = priv_size;
1708 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1710 struct rte_cryptodev *dev = NULL;
1716 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1717 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1718 "%s_%u", dev_name_prefix, i);
1723 dev = rte_cryptodev_pmd_get_named_dev(name);
1731 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1733 static struct cryptodev_driver_list cryptodev_driver_list =
1734 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1737 rte_cryptodev_driver_id_get(const char *name)
1739 struct cryptodev_driver *driver;
1740 const char *driver_name;
1743 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1747 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1748 driver_name = driver->driver->name;
1749 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
1756 rte_cryptodev_name_get(uint8_t dev_id)
1758 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id);
1763 return dev->data->name;
1767 rte_cryptodev_driver_name_get(uint8_t driver_id)
1769 struct cryptodev_driver *driver;
1771 TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1772 if (driver->id == driver_id)
1773 return driver->driver->name;
1778 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1779 const struct rte_driver *drv)
1781 crypto_drv->driver = drv;
1782 crypto_drv->id = nb_drivers;
1784 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1786 return nb_drivers++;