1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
15 #include <netinet/in.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
44 static uint8_t nb_drivers;
46 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
48 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
50 static struct rte_cryptodev_global cryptodev_globals = {
51 .devs = rte_crypto_devices,
56 /* spinlock for crypto device callbacks */
57 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
61 * The user application callback description.
63 * It contains callback address to be registered by user application,
64 * the pointer to the parameters for callback, and the event type.
66 struct rte_cryptodev_callback {
67 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
68 rte_cryptodev_cb_fn cb_fn; /**< Callback address */
69 void *cb_arg; /**< Parameter for callback */
70 enum rte_cryptodev_event_type event; /**< Interrupt event type */
71 uint32_t active; /**< Callback is executing */
75 * The crypto cipher algorithm strings identifiers.
76 * It could be used in application command line.
79 rte_crypto_cipher_algorithm_strings[] = {
80 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc",
81 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb",
82 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr",
84 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc",
85 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr",
86 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi",
87 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb",
88 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8",
89 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts",
91 [RTE_CRYPTO_CIPHER_ARC4] = "arc4",
93 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc",
94 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi",
96 [RTE_CRYPTO_CIPHER_NULL] = "null",
98 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8",
99 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
100 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3"
104 * The crypto cipher operation strings identifiers.
105 * It could be used in application command line.
108 rte_crypto_cipher_operation_strings[] = {
109 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt",
110 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt"
114 * The crypto auth algorithm strings identifiers.
115 * It could be used in application command line.
118 rte_crypto_auth_algorithm_strings[] = {
119 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac",
120 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac",
121 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac",
122 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac",
124 [RTE_CRYPTO_AUTH_MD5] = "md5",
125 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac",
127 [RTE_CRYPTO_AUTH_NULL] = "null",
129 [RTE_CRYPTO_AUTH_SHA1] = "sha1",
130 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac",
132 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224",
133 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac",
134 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256",
135 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac",
136 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384",
137 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac",
138 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512",
139 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac",
141 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9",
142 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2",
143 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3"
147 * The crypto AEAD algorithm strings identifiers.
148 * It could be used in application command line.
151 rte_crypto_aead_algorithm_strings[] = {
152 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm",
153 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm",
157 * The crypto AEAD operation strings identifiers.
158 * It could be used in application command line.
161 rte_crypto_aead_operation_strings[] = {
162 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt",
163 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt"
167 * Asymmetric crypto transform operation strings identifiers.
169 const char *rte_crypto_asym_xform_strings[] = {
170 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none",
171 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa",
172 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp",
173 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv",
174 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh",
175 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa",
176 [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa",
177 [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm",
181 * Asymmetric crypto operation strings identifiers.
183 const char *rte_crypto_asym_op_strings[] = {
184 [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt",
185 [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt",
186 [RTE_CRYPTO_ASYM_OP_SIGN] = "sign",
187 [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify",
188 [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE] = "priv_key_generate",
189 [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
190 [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
194 * The private data structure stored in the session mempool private data.
196 struct rte_cryptodev_sym_session_pool_private_data {
198 /**< number of elements in sess_data array */
199 uint16_t user_data_sz;
200 /**< session user data will be placed after sess_data */
204 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
205 const char *algo_string)
209 for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
210 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
211 *algo_enum = (enum rte_crypto_cipher_algorithm) i;
221 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
222 const char *algo_string)
226 for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
227 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
228 *algo_enum = (enum rte_crypto_auth_algorithm) i;
238 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
239 const char *algo_string)
243 for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
244 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
245 *algo_enum = (enum rte_crypto_aead_algorithm) i;
255 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
256 const char *xform_string)
260 for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
261 if (strcmp(xform_string,
262 rte_crypto_asym_xform_strings[i]) == 0) {
263 *xform_enum = (enum rte_crypto_asym_xform_type) i;
273 * The crypto auth operation strings identifiers.
274 * It could be used in application command line.
277 rte_crypto_auth_operation_strings[] = {
278 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify",
279 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate"
282 const struct rte_cryptodev_symmetric_capability *
283 rte_cryptodev_sym_capability_get(uint8_t dev_id,
284 const struct rte_cryptodev_sym_capability_idx *idx)
286 const struct rte_cryptodev_capabilities *capability;
287 struct rte_cryptodev_info dev_info;
290 rte_cryptodev_info_get(dev_id, &dev_info);
292 while ((capability = &dev_info.capabilities[i++])->op !=
293 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
294 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
297 if (capability->sym.xform_type != idx->type)
300 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
301 capability->sym.auth.algo == idx->algo.auth)
302 return &capability->sym;
304 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
305 capability->sym.cipher.algo == idx->algo.cipher)
306 return &capability->sym;
308 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
309 capability->sym.aead.algo == idx->algo.aead)
310 return &capability->sym;
318 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
320 unsigned int next_size;
322 /* Check lower/upper bounds */
323 if (size < range->min)
326 if (size > range->max)
329 /* If range is actually only one value, size is correct */
330 if (range->increment == 0)
333 /* Check if value is one of the supported sizes */
334 for (next_size = range->min; next_size <= range->max;
335 next_size += range->increment)
336 if (size == next_size)
342 const struct rte_cryptodev_asymmetric_xform_capability *
343 rte_cryptodev_asym_capability_get(uint8_t dev_id,
344 const struct rte_cryptodev_asym_capability_idx *idx)
346 const struct rte_cryptodev_capabilities *capability;
347 struct rte_cryptodev_info dev_info;
350 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
351 rte_cryptodev_info_get(dev_id, &dev_info);
353 while ((capability = &dev_info.capabilities[i++])->op !=
354 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
355 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
358 if (capability->asym.xform_capa.xform_type == idx->type)
359 return &capability->asym.xform_capa;
365 rte_cryptodev_sym_capability_check_cipher(
366 const struct rte_cryptodev_symmetric_capability *capability,
367 uint16_t key_size, uint16_t iv_size)
369 if (param_range_check(key_size, &capability->cipher.key_size) != 0)
372 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
379 rte_cryptodev_sym_capability_check_auth(
380 const struct rte_cryptodev_symmetric_capability *capability,
381 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
383 if (param_range_check(key_size, &capability->auth.key_size) != 0)
386 if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
389 if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
396 rte_cryptodev_sym_capability_check_aead(
397 const struct rte_cryptodev_symmetric_capability *capability,
398 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
401 if (param_range_check(key_size, &capability->aead.key_size) != 0)
404 if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
407 if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
410 if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
416 rte_cryptodev_asym_xform_capability_check_optype(
417 const struct rte_cryptodev_asymmetric_xform_capability *capability,
418 enum rte_crypto_asym_op_type op_type)
420 if (capability->op_types & (1 << op_type))
427 rte_cryptodev_asym_xform_capability_check_modlen(
428 const struct rte_cryptodev_asymmetric_xform_capability *capability,
431 /* no need to check for limits, if min or max = 0 */
432 if (capability->modlen.min != 0) {
433 if (modlen < capability->modlen.min)
437 if (capability->modlen.max != 0) {
438 if (modlen > capability->modlen.max)
442 /* in any case, check if given modlen is module increment */
443 if (capability->modlen.increment != 0) {
444 if (modlen % (capability->modlen.increment))
453 rte_cryptodev_get_feature_name(uint64_t flag)
456 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
457 return "SYMMETRIC_CRYPTO";
458 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
459 return "ASYMMETRIC_CRYPTO";
460 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
461 return "SYM_OPERATION_CHAINING";
462 case RTE_CRYPTODEV_FF_CPU_SSE:
464 case RTE_CRYPTODEV_FF_CPU_AVX:
466 case RTE_CRYPTODEV_FF_CPU_AVX2:
468 case RTE_CRYPTODEV_FF_CPU_AVX512:
470 case RTE_CRYPTODEV_FF_CPU_AESNI:
472 case RTE_CRYPTODEV_FF_HW_ACCELERATED:
473 return "HW_ACCELERATED";
474 case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
475 return "IN_PLACE_SGL";
476 case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
477 return "OOP_SGL_IN_SGL_OUT";
478 case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
479 return "OOP_SGL_IN_LB_OUT";
480 case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
481 return "OOP_LB_IN_SGL_OUT";
482 case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
483 return "OOP_LB_IN_LB_OUT";
484 case RTE_CRYPTODEV_FF_CPU_NEON:
486 case RTE_CRYPTODEV_FF_CPU_ARM_CE:
488 case RTE_CRYPTODEV_FF_SECURITY:
489 return "SECURITY_PROTOCOL";
490 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
491 return "RSA_PRIV_OP_KEY_EXP";
492 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
493 return "RSA_PRIV_OP_KEY_QT";
494 case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
495 return "DIGEST_ENCRYPTED";
501 struct rte_cryptodev *
502 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
504 return &cryptodev_globals.devs[dev_id];
507 struct rte_cryptodev *
508 rte_cryptodev_pmd_get_named_dev(const char *name)
510 struct rte_cryptodev *dev;
516 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
517 dev = &cryptodev_globals.devs[i];
519 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
520 (strcmp(dev->data->name, name) == 0))
527 static inline uint8_t
528 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
530 if (rte_crypto_devices[dev_id].data == NULL)
537 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
539 struct rte_cryptodev *dev = NULL;
541 if (!rte_cryptodev_is_valid_device_data(dev_id))
544 dev = rte_cryptodev_pmd_get_dev(dev_id);
545 if (dev->attached != RTE_CRYPTODEV_ATTACHED)
553 rte_cryptodev_get_dev_id(const char *name)
560 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
561 if (!rte_cryptodev_is_valid_device_data(i))
563 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
565 (cryptodev_globals.devs[i].attached ==
566 RTE_CRYPTODEV_ATTACHED))
574 rte_cryptodev_count(void)
576 return cryptodev_globals.nb_devs;
580 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
582 uint8_t i, dev_count = 0;
584 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
585 if (cryptodev_globals.devs[i].driver_id == driver_id &&
586 cryptodev_globals.devs[i].attached ==
587 RTE_CRYPTODEV_ATTACHED)
594 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
597 uint8_t i, count = 0;
598 struct rte_cryptodev *devs = cryptodev_globals.devs;
600 for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
601 if (!rte_cryptodev_is_valid_device_data(i))
604 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
607 cmp = strncmp(devs[i].device->driver->name,
609 strlen(driver_name) + 1);
612 devices[count++] = devs[i].data->dev_id;
620 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
622 if (rte_crypto_devices[dev_id].feature_flags &
623 RTE_CRYPTODEV_FF_SECURITY)
624 return rte_crypto_devices[dev_id].security_ctx;
630 rte_cryptodev_socket_id(uint8_t dev_id)
632 struct rte_cryptodev *dev;
634 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
637 dev = rte_cryptodev_pmd_get_dev(dev_id);
639 return dev->data->socket_id;
643 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
646 char mz_name[RTE_MEMZONE_NAMESIZE];
647 const struct rte_memzone *mz;
650 /* generate memzone name */
651 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
652 if (n >= (int)sizeof(mz_name))
655 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
656 mz = rte_memzone_reserve(mz_name,
657 sizeof(struct rte_cryptodev_data),
660 mz = rte_memzone_lookup(mz_name);
666 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
667 memset(*data, 0, sizeof(struct rte_cryptodev_data));
673 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
675 char mz_name[RTE_MEMZONE_NAMESIZE];
676 const struct rte_memzone *mz;
679 /* generate memzone name */
680 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
681 if (n >= (int)sizeof(mz_name))
684 mz = rte_memzone_lookup(mz_name);
688 RTE_ASSERT(*data == mz->addr);
691 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
692 return rte_memzone_free(mz);
698 rte_cryptodev_find_free_device_index(void)
702 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
703 if (rte_crypto_devices[dev_id].attached ==
704 RTE_CRYPTODEV_DETACHED)
707 return RTE_CRYPTO_MAX_DEVS;
710 struct rte_cryptodev *
711 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
713 struct rte_cryptodev *cryptodev;
716 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
717 CDEV_LOG_ERR("Crypto device with name %s already "
722 dev_id = rte_cryptodev_find_free_device_index();
723 if (dev_id == RTE_CRYPTO_MAX_DEVS) {
724 CDEV_LOG_ERR("Reached maximum number of crypto devices");
728 cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
730 if (cryptodev->data == NULL) {
731 struct rte_cryptodev_data **cryptodev_data =
732 &cryptodev_globals.data[dev_id];
734 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
737 if (retval < 0 || *cryptodev_data == NULL)
740 cryptodev->data = *cryptodev_data;
742 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
743 strlcpy(cryptodev->data->name, name,
744 RTE_CRYPTODEV_NAME_MAX_LEN);
746 cryptodev->data->dev_id = dev_id;
747 cryptodev->data->socket_id = socket_id;
748 cryptodev->data->dev_started = 0;
751 /* init user callbacks */
752 TAILQ_INIT(&(cryptodev->link_intr_cbs));
754 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
756 cryptodev_globals.nb_devs++;
763 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
768 if (cryptodev == NULL)
771 dev_id = cryptodev->data->dev_id;
773 /* Close device only if device operations have been set */
774 if (cryptodev->dev_ops) {
775 ret = rte_cryptodev_close(dev_id);
780 ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
784 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
785 cryptodev_globals.nb_devs--;
790 rte_cryptodev_queue_pair_count(uint8_t dev_id)
792 struct rte_cryptodev *dev;
794 dev = &rte_crypto_devices[dev_id];
795 return dev->data->nb_queue_pairs;
799 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
802 struct rte_cryptodev_info dev_info;
806 if ((dev == NULL) || (nb_qpairs < 1)) {
807 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
812 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
813 nb_qpairs, dev->data->dev_id);
815 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
817 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
818 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
820 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
821 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
822 nb_qpairs, dev->data->dev_id);
826 if (dev->data->queue_pairs == NULL) { /* first time configuration */
827 dev->data->queue_pairs = rte_zmalloc_socket(
828 "cryptodev->queue_pairs",
829 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
830 RTE_CACHE_LINE_SIZE, socket_id);
832 if (dev->data->queue_pairs == NULL) {
833 dev->data->nb_queue_pairs = 0;
834 CDEV_LOG_ERR("failed to get memory for qp meta data, "
839 } else { /* re-configure */
841 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
843 qp = dev->data->queue_pairs;
845 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
848 for (i = nb_qpairs; i < old_nb_queues; i++) {
849 ret = (*dev->dev_ops->queue_pair_release)(dev, i);
854 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
855 RTE_CACHE_LINE_SIZE);
857 CDEV_LOG_ERR("failed to realloc qp meta data,"
858 " nb_queues %u", nb_qpairs);
862 if (nb_qpairs > old_nb_queues) {
863 uint16_t new_qs = nb_qpairs - old_nb_queues;
865 memset(qp + old_nb_queues, 0,
866 sizeof(qp[0]) * new_qs);
869 dev->data->queue_pairs = qp;
872 dev->data->nb_queue_pairs = nb_qpairs;
877 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
879 struct rte_cryptodev *dev;
882 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
883 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
887 dev = &rte_crypto_devices[dev_id];
889 if (dev->data->dev_started) {
891 "device %d must be stopped to allow configuration", dev_id);
895 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
897 /* Setup new number of queue pairs and reconfigure device. */
898 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
901 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
906 return (*dev->dev_ops->dev_configure)(dev, config);
911 rte_cryptodev_start(uint8_t dev_id)
913 struct rte_cryptodev *dev;
916 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
918 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
919 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
923 dev = &rte_crypto_devices[dev_id];
925 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
927 if (dev->data->dev_started != 0) {
928 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
933 diag = (*dev->dev_ops->dev_start)(dev);
935 dev->data->dev_started = 1;
943 rte_cryptodev_stop(uint8_t dev_id)
945 struct rte_cryptodev *dev;
947 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
948 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
952 dev = &rte_crypto_devices[dev_id];
954 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
956 if (dev->data->dev_started == 0) {
957 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
962 (*dev->dev_ops->dev_stop)(dev);
963 dev->data->dev_started = 0;
967 rte_cryptodev_close(uint8_t dev_id)
969 struct rte_cryptodev *dev;
972 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
973 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
977 dev = &rte_crypto_devices[dev_id];
979 /* Device must be stopped before it can be closed */
980 if (dev->data->dev_started == 1) {
981 CDEV_LOG_ERR("Device %u must be stopped before closing",
986 /* We can't close the device if there are outstanding sessions in use */
987 if (dev->data->session_pool != NULL) {
988 if (!rte_mempool_full(dev->data->session_pool)) {
989 CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
990 "has sessions still in use, free "
991 "all sessions before calling close",
997 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
998 retval = (*dev->dev_ops->dev_close)(dev);
1007 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1008 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1011 struct rte_cryptodev *dev;
1013 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1014 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1018 dev = &rte_crypto_devices[dev_id];
1019 if (queue_pair_id >= dev->data->nb_queue_pairs) {
1020 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1025 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1029 if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1030 (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1031 CDEV_LOG_ERR("Invalid mempools\n");
1035 if (qp_conf->mp_session) {
1036 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1037 uint32_t obj_size = qp_conf->mp_session->elt_size;
1038 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1039 struct rte_cryptodev_sym_session s = {0};
1041 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1042 if (!pool_priv || qp_conf->mp_session->private_data_size <
1043 sizeof(*pool_priv)) {
1044 CDEV_LOG_ERR("Invalid mempool\n");
1048 s.nb_drivers = pool_priv->nb_drivers;
1049 s.user_data_sz = pool_priv->user_data_sz;
1051 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1052 obj_size) || (s.nb_drivers <= dev->driver_id) ||
1053 rte_cryptodev_sym_get_private_session_size(dev_id) >
1055 CDEV_LOG_ERR("Invalid mempool\n");
1060 if (dev->data->dev_started) {
1062 "device %d must be stopped to allow configuration", dev_id);
1066 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1068 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1074 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1076 struct rte_cryptodev *dev;
1078 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1079 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1083 if (stats == NULL) {
1084 CDEV_LOG_ERR("Invalid stats ptr");
1088 dev = &rte_crypto_devices[dev_id];
1089 memset(stats, 0, sizeof(*stats));
1091 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1092 (*dev->dev_ops->stats_get)(dev, stats);
1097 rte_cryptodev_stats_reset(uint8_t dev_id)
1099 struct rte_cryptodev *dev;
1101 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1102 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1106 dev = &rte_crypto_devices[dev_id];
1108 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1109 (*dev->dev_ops->stats_reset)(dev);
1114 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1116 struct rte_cryptodev *dev;
1118 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1119 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1123 dev = &rte_crypto_devices[dev_id];
1125 memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1127 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1128 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1130 dev_info->driver_name = dev->device->driver->name;
1131 dev_info->device = dev->device;
1136 rte_cryptodev_callback_register(uint8_t dev_id,
1137 enum rte_cryptodev_event_type event,
1138 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1140 struct rte_cryptodev *dev;
1141 struct rte_cryptodev_callback *user_cb;
1146 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1147 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1151 dev = &rte_crypto_devices[dev_id];
1152 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1154 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1155 if (user_cb->cb_fn == cb_fn &&
1156 user_cb->cb_arg == cb_arg &&
1157 user_cb->event == event) {
1162 /* create a new callback. */
1163 if (user_cb == NULL) {
1164 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1165 sizeof(struct rte_cryptodev_callback), 0);
1166 if (user_cb != NULL) {
1167 user_cb->cb_fn = cb_fn;
1168 user_cb->cb_arg = cb_arg;
1169 user_cb->event = event;
1170 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1174 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1175 return (user_cb == NULL) ? -ENOMEM : 0;
1179 rte_cryptodev_callback_unregister(uint8_t dev_id,
1180 enum rte_cryptodev_event_type event,
1181 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1184 struct rte_cryptodev *dev;
1185 struct rte_cryptodev_callback *cb, *next;
1190 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1191 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1195 dev = &rte_crypto_devices[dev_id];
1196 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1199 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1201 next = TAILQ_NEXT(cb, next);
1203 if (cb->cb_fn != cb_fn || cb->event != event ||
1204 (cb->cb_arg != (void *)-1 &&
1205 cb->cb_arg != cb_arg))
1209 * if this callback is not executing right now,
1212 if (cb->active == 0) {
1213 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1220 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1225 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1226 enum rte_cryptodev_event_type event)
1228 struct rte_cryptodev_callback *cb_lst;
1229 struct rte_cryptodev_callback dev_cb;
1231 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1232 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1233 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1237 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1238 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1240 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1243 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1248 rte_cryptodev_sym_session_init(uint8_t dev_id,
1249 struct rte_cryptodev_sym_session *sess,
1250 struct rte_crypto_sym_xform *xforms,
1251 struct rte_mempool *mp)
1253 struct rte_cryptodev *dev;
1254 uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1259 dev = rte_cryptodev_pmd_get_dev(dev_id);
1261 if (sess == NULL || xforms == NULL || dev == NULL)
1264 if (mp->elt_size < sess_priv_sz)
1267 index = dev->driver_id;
1268 if (index >= sess->nb_drivers)
1271 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1273 if (sess->sess_data[index].refcnt == 0) {
1274 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1278 "dev_id %d failed to configure session details",
1284 sess->sess_data[index].refcnt++;
1289 rte_cryptodev_asym_session_init(uint8_t dev_id,
1290 struct rte_cryptodev_asym_session *sess,
1291 struct rte_crypto_asym_xform *xforms,
1292 struct rte_mempool *mp)
1294 struct rte_cryptodev *dev;
1298 dev = rte_cryptodev_pmd_get_dev(dev_id);
1300 if (sess == NULL || xforms == NULL || dev == NULL)
1303 index = dev->driver_id;
1305 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1308 if (sess->sess_private_data[index] == NULL) {
1309 ret = dev->dev_ops->asym_session_configure(dev,
1314 "dev_id %d failed to configure session details",
1323 struct rte_mempool *
1324 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1325 uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1328 struct rte_mempool *mp;
1329 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1332 obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1333 if (obj_sz > elt_size)
1334 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1339 mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1340 (uint32_t)(sizeof(*pool_priv)),
1341 NULL, NULL, NULL, NULL,
1344 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1345 __func__, name, rte_errno);
1349 pool_priv = rte_mempool_get_priv(mp);
1351 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1353 rte_mempool_free(mp);
1357 pool_priv->nb_drivers = nb_drivers;
1358 pool_priv->user_data_sz = user_data_size;
1364 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1366 return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1370 struct rte_cryptodev_sym_session *
1371 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1373 struct rte_cryptodev_sym_session *sess;
1374 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1377 CDEV_LOG_ERR("Invalid mempool\n");
1381 pool_priv = rte_mempool_get_priv(mp);
1383 if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) {
1384 CDEV_LOG_ERR("Invalid mempool\n");
1388 /* Allocate a session structure from the session pool */
1389 if (rte_mempool_get(mp, (void **)&sess)) {
1390 CDEV_LOG_ERR("couldn't get object from session mempool");
1394 sess->nb_drivers = pool_priv->nb_drivers;
1395 sess->user_data_sz = pool_priv->user_data_sz;
1396 sess->opaque_data = 0;
1398 /* Clear device session pointer.
1399 * Include the flag indicating presence of user data
1401 memset(sess->sess_data, 0,
1402 rte_cryptodev_sym_session_data_size(sess));
1407 struct rte_cryptodev_asym_session *
1408 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1410 struct rte_cryptodev_asym_session *sess;
1412 /* Allocate a session structure from the session pool */
1413 if (rte_mempool_get(mp, (void **)&sess)) {
1414 CDEV_LOG_ERR("couldn't get object from session mempool");
1418 /* Clear device session pointer.
1419 * Include the flag indicating presence of private data
1421 memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1427 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1428 struct rte_cryptodev_sym_session *sess)
1430 struct rte_cryptodev *dev;
1433 dev = rte_cryptodev_pmd_get_dev(dev_id);
1435 if (dev == NULL || sess == NULL)
1438 driver_id = dev->driver_id;
1439 if (sess->sess_data[driver_id].refcnt == 0)
1441 if (--sess->sess_data[driver_id].refcnt != 0)
1444 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1446 dev->dev_ops->sym_session_clear(dev, sess);
1452 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1453 struct rte_cryptodev_asym_session *sess)
1455 struct rte_cryptodev *dev;
1457 dev = rte_cryptodev_pmd_get_dev(dev_id);
1459 if (dev == NULL || sess == NULL)
1462 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1464 dev->dev_ops->asym_session_clear(dev, sess);
1470 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1473 struct rte_mempool *sess_mp;
1478 /* Check that all device private data has been freed */
1479 for (i = 0; i < sess->nb_drivers; i++) {
1480 if (sess->sess_data[i].refcnt != 0)
1484 /* Return session to mempool */
1485 sess_mp = rte_mempool_from_obj(sess);
1486 rte_mempool_put(sess_mp, sess);
1492 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1496 struct rte_mempool *sess_mp;
1501 /* Check that all device private data has been freed */
1502 for (i = 0; i < nb_drivers; i++) {
1503 sess_priv = get_asym_session_private_data(sess, i);
1504 if (sess_priv != NULL)
1508 /* Return session to mempool */
1509 sess_mp = rte_mempool_from_obj(sess);
1510 rte_mempool_put(sess_mp, sess);
1516 rte_cryptodev_sym_get_header_session_size(void)
1519 * Header contains pointers to the private data of all registered
1520 * drivers and all necessary information to ensure safely clear
1521 * or free al session.
1523 struct rte_cryptodev_sym_session s = {0};
1525 s.nb_drivers = nb_drivers;
1527 return (unsigned int)(sizeof(s) +
1528 rte_cryptodev_sym_session_data_size(&s));
1532 rte_cryptodev_sym_get_existing_header_session_size(
1533 struct rte_cryptodev_sym_session *sess)
1538 return (unsigned int)(sizeof(*sess) +
1539 rte_cryptodev_sym_session_data_size(sess));
1543 rte_cryptodev_asym_get_header_session_size(void)
1546 * Header contains pointers to the private data
1547 * of all registered drivers, and a flag which
1548 * indicates presence of private data
1550 return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1554 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1556 struct rte_cryptodev *dev;
1557 unsigned int priv_sess_size;
1559 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1562 dev = rte_cryptodev_pmd_get_dev(dev_id);
1564 if (*dev->dev_ops->sym_session_get_size == NULL)
1567 priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1569 return priv_sess_size;
1573 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1575 struct rte_cryptodev *dev;
1576 unsigned int header_size = sizeof(void *) * nb_drivers;
1577 unsigned int priv_sess_size;
1579 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1582 dev = rte_cryptodev_pmd_get_dev(dev_id);
1584 if (*dev->dev_ops->asym_session_get_size == NULL)
1587 priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1588 if (priv_sess_size < header_size)
1591 return priv_sess_size;
1596 rte_cryptodev_sym_session_set_user_data(
1597 struct rte_cryptodev_sym_session *sess,
1604 if (sess->user_data_sz < size)
1607 rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1612 rte_cryptodev_sym_session_get_user_data(
1613 struct rte_cryptodev_sym_session *sess)
1615 if (sess == NULL || sess->user_data_sz == 0)
1618 return (void *)(sess->sess_data + sess->nb_drivers);
1621 /** Initialise rte_crypto_op mempool element */
1623 rte_crypto_op_init(struct rte_mempool *mempool,
1626 __rte_unused unsigned i)
1628 struct rte_crypto_op *op = _op_data;
1629 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1631 memset(_op_data, 0, mempool->elt_size);
1633 __rte_crypto_op_reset(op, type);
1635 op->phys_addr = rte_mem_virt2iova(_op_data);
1636 op->mempool = mempool;
1640 struct rte_mempool *
1641 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1642 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1645 struct rte_crypto_op_pool_private *priv;
1647 unsigned elt_size = sizeof(struct rte_crypto_op) +
1650 if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1651 elt_size += sizeof(struct rte_crypto_sym_op);
1652 } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1653 elt_size += sizeof(struct rte_crypto_asym_op);
1654 } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1655 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1656 sizeof(struct rte_crypto_asym_op));
1658 CDEV_LOG_ERR("Invalid op_type\n");
1662 /* lookup mempool in case already allocated */
1663 struct rte_mempool *mp = rte_mempool_lookup(name);
1666 priv = (struct rte_crypto_op_pool_private *)
1667 rte_mempool_get_priv(mp);
1669 if (mp->elt_size != elt_size ||
1670 mp->cache_size < cache_size ||
1671 mp->size < nb_elts ||
1672 priv->priv_size < priv_size) {
1674 CDEV_LOG_ERR("Mempool %s already exists but with "
1675 "incompatible parameters", name);
1681 mp = rte_mempool_create(
1686 sizeof(struct rte_crypto_op_pool_private),
1695 CDEV_LOG_ERR("Failed to create mempool %s", name);
1699 priv = (struct rte_crypto_op_pool_private *)
1700 rte_mempool_get_priv(mp);
1702 priv->priv_size = priv_size;
1709 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1711 struct rte_cryptodev *dev = NULL;
1717 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1718 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1719 "%s_%u", dev_name_prefix, i);
1724 dev = rte_cryptodev_pmd_get_named_dev(name);
1732 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1734 static struct cryptodev_driver_list cryptodev_driver_list =
1735 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1738 rte_cryptodev_driver_id_get(const char *name)
1740 struct cryptodev_driver *driver;
1741 const char *driver_name;
1744 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1748 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1749 driver_name = driver->driver->name;
1750 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
1757 rte_cryptodev_name_get(uint8_t dev_id)
1759 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id);
1764 return dev->data->name;
1768 rte_cryptodev_driver_name_get(uint8_t driver_id)
1770 struct cryptodev_driver *driver;
1772 TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1773 if (driver->id == driver_id)
1774 return driver->driver->name;
1779 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1780 const struct rte_driver *drv)
1782 crypto_drv->driver = drv;
1783 crypto_drv->id = nb_drivers;
1785 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1787 return nb_drivers++;