1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
15 #include <netinet/in.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
44 static uint8_t nb_drivers;
46 struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
48 struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0];
50 static struct rte_cryptodev_global cryptodev_globals = {
51 .devs = &rte_crypto_devices[0],
54 .max_devs = RTE_CRYPTO_MAX_DEVS
57 struct rte_cryptodev_global *rte_cryptodev_globals = &cryptodev_globals;
59 /* spinlock for crypto device callbacks */
60 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
64 * The user application callback description.
66 * It contains callback address to be registered by user application,
67 * the pointer to the parameters for callback, and the event type.
69 struct rte_cryptodev_callback {
70 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
71 rte_cryptodev_cb_fn cb_fn; /**< Callback address */
72 void *cb_arg; /**< Parameter for callback */
73 enum rte_cryptodev_event_type event; /**< Interrupt event type */
74 uint32_t active; /**< Callback is executing */
78 * The crypto cipher algorithm strings identifiers.
79 * It could be used in application command line.
82 rte_crypto_cipher_algorithm_strings[] = {
83 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc",
84 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb",
85 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr",
87 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc",
88 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr",
89 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi",
90 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb",
91 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8",
92 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts",
94 [RTE_CRYPTO_CIPHER_ARC4] = "arc4",
96 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc",
97 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi",
99 [RTE_CRYPTO_CIPHER_NULL] = "null",
101 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8",
102 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
103 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3"
107 * The crypto cipher operation strings identifiers.
108 * It could be used in application command line.
111 rte_crypto_cipher_operation_strings[] = {
112 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt",
113 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt"
117 * The crypto auth algorithm strings identifiers.
118 * It could be used in application command line.
121 rte_crypto_auth_algorithm_strings[] = {
122 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac",
123 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac",
124 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac",
125 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac",
127 [RTE_CRYPTO_AUTH_MD5] = "md5",
128 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac",
130 [RTE_CRYPTO_AUTH_NULL] = "null",
132 [RTE_CRYPTO_AUTH_SHA1] = "sha1",
133 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac",
135 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224",
136 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac",
137 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256",
138 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac",
139 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384",
140 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac",
141 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512",
142 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac",
144 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9",
145 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2",
146 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3"
150 * The crypto AEAD algorithm strings identifiers.
151 * It could be used in application command line.
154 rte_crypto_aead_algorithm_strings[] = {
155 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm",
156 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm",
160 * The crypto AEAD operation strings identifiers.
161 * It could be used in application command line.
164 rte_crypto_aead_operation_strings[] = {
165 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt",
166 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt"
170 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
171 const char *algo_string)
175 for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
176 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
177 *algo_enum = (enum rte_crypto_cipher_algorithm) i;
187 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
188 const char *algo_string)
192 for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
193 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
194 *algo_enum = (enum rte_crypto_auth_algorithm) i;
204 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
205 const char *algo_string)
209 for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
210 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
211 *algo_enum = (enum rte_crypto_aead_algorithm) i;
221 * The crypto auth operation strings identifiers.
222 * It could be used in application command line.
225 rte_crypto_auth_operation_strings[] = {
226 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify",
227 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate"
230 const struct rte_cryptodev_symmetric_capability *
231 rte_cryptodev_sym_capability_get(uint8_t dev_id,
232 const struct rte_cryptodev_sym_capability_idx *idx)
234 const struct rte_cryptodev_capabilities *capability;
235 struct rte_cryptodev_info dev_info;
238 rte_cryptodev_info_get(dev_id, &dev_info);
240 while ((capability = &dev_info.capabilities[i++])->op !=
241 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
242 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
245 if (capability->sym.xform_type != idx->type)
248 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
249 capability->sym.auth.algo == idx->algo.auth)
250 return &capability->sym;
252 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
253 capability->sym.cipher.algo == idx->algo.cipher)
254 return &capability->sym;
256 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
257 capability->sym.aead.algo == idx->algo.aead)
258 return &capability->sym;
266 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
268 unsigned int next_size;
270 /* Check lower/upper bounds */
271 if (size < range->min)
274 if (size > range->max)
277 /* If range is actually only one value, size is correct */
278 if (range->increment == 0)
281 /* Check if value is one of the supported sizes */
282 for (next_size = range->min; next_size <= range->max;
283 next_size += range->increment)
284 if (size == next_size)
291 rte_cryptodev_sym_capability_check_cipher(
292 const struct rte_cryptodev_symmetric_capability *capability,
293 uint16_t key_size, uint16_t iv_size)
295 if (param_range_check(key_size, &capability->cipher.key_size) != 0)
298 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
305 rte_cryptodev_sym_capability_check_auth(
306 const struct rte_cryptodev_symmetric_capability *capability,
307 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
309 if (param_range_check(key_size, &capability->auth.key_size) != 0)
312 if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
315 if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
322 rte_cryptodev_sym_capability_check_aead(
323 const struct rte_cryptodev_symmetric_capability *capability,
324 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
327 if (param_range_check(key_size, &capability->aead.key_size) != 0)
330 if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
333 if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
336 if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
343 rte_cryptodev_get_feature_name(uint64_t flag)
346 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
347 return "SYMMETRIC_CRYPTO";
348 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
349 return "ASYMMETRIC_CRYPTO";
350 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
351 return "SYM_OPERATION_CHAINING";
352 case RTE_CRYPTODEV_FF_CPU_SSE:
354 case RTE_CRYPTODEV_FF_CPU_AVX:
356 case RTE_CRYPTODEV_FF_CPU_AVX2:
358 case RTE_CRYPTODEV_FF_CPU_AVX512:
360 case RTE_CRYPTODEV_FF_CPU_AESNI:
362 case RTE_CRYPTODEV_FF_HW_ACCELERATED:
363 return "HW_ACCELERATED";
364 case RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER:
365 return "MBUF_SCATTER_GATHER";
366 case RTE_CRYPTODEV_FF_CPU_NEON:
368 case RTE_CRYPTODEV_FF_CPU_ARM_CE:
370 case RTE_CRYPTODEV_FF_SECURITY:
371 return "SECURITY_PROTOCOL";
377 struct rte_cryptodev *
378 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
380 return &rte_cryptodev_globals->devs[dev_id];
383 struct rte_cryptodev *
384 rte_cryptodev_pmd_get_named_dev(const char *name)
386 struct rte_cryptodev *dev;
392 for (i = 0; i < rte_cryptodev_globals->max_devs; i++) {
393 dev = &rte_cryptodev_globals->devs[i];
395 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
396 (strcmp(dev->data->name, name) == 0))
404 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
406 struct rte_cryptodev *dev = NULL;
408 if (dev_id >= rte_cryptodev_globals->nb_devs)
411 dev = rte_cryptodev_pmd_get_dev(dev_id);
412 if (dev->attached != RTE_CRYPTODEV_ATTACHED)
420 rte_cryptodev_get_dev_id(const char *name)
427 for (i = 0; i < rte_cryptodev_globals->nb_devs; i++)
428 if ((strcmp(rte_cryptodev_globals->devs[i].data->name, name)
430 (rte_cryptodev_globals->devs[i].attached ==
431 RTE_CRYPTODEV_ATTACHED))
438 rte_cryptodev_count(void)
440 return rte_cryptodev_globals->nb_devs;
444 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
446 uint8_t i, dev_count = 0;
448 for (i = 0; i < rte_cryptodev_globals->max_devs; i++)
449 if (rte_cryptodev_globals->devs[i].driver_id == driver_id &&
450 rte_cryptodev_globals->devs[i].attached ==
451 RTE_CRYPTODEV_ATTACHED)
458 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
461 uint8_t i, count = 0;
462 struct rte_cryptodev *devs = rte_cryptodev_globals->devs;
463 uint8_t max_devs = rte_cryptodev_globals->max_devs;
465 for (i = 0; i < max_devs && count < nb_devices; i++) {
467 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
470 cmp = strncmp(devs[i].device->driver->name,
472 strlen(driver_name));
475 devices[count++] = devs[i].data->dev_id;
483 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
485 if (rte_crypto_devices[dev_id].feature_flags &
486 RTE_CRYPTODEV_FF_SECURITY)
487 return rte_crypto_devices[dev_id].security_ctx;
493 rte_cryptodev_socket_id(uint8_t dev_id)
495 struct rte_cryptodev *dev;
497 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
500 dev = rte_cryptodev_pmd_get_dev(dev_id);
502 return dev->data->socket_id;
506 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
509 char mz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
510 const struct rte_memzone *mz;
513 /* generate memzone name */
514 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
515 if (n >= (int)sizeof(mz_name))
518 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
519 mz = rte_memzone_reserve(mz_name,
520 sizeof(struct rte_cryptodev_data),
523 mz = rte_memzone_lookup(mz_name);
529 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
530 memset(*data, 0, sizeof(struct rte_cryptodev_data));
536 rte_cryptodev_find_free_device_index(void)
540 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
541 if (rte_crypto_devices[dev_id].attached ==
542 RTE_CRYPTODEV_DETACHED)
545 return RTE_CRYPTO_MAX_DEVS;
548 struct rte_cryptodev *
549 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
551 struct rte_cryptodev *cryptodev;
554 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
555 CDEV_LOG_ERR("Crypto device with name %s already "
560 dev_id = rte_cryptodev_find_free_device_index();
561 if (dev_id == RTE_CRYPTO_MAX_DEVS) {
562 CDEV_LOG_ERR("Reached maximum number of crypto devices");
566 cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
568 if (cryptodev->data == NULL) {
569 struct rte_cryptodev_data *cryptodev_data =
570 cryptodev_globals.data[dev_id];
572 int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data,
575 if (retval < 0 || cryptodev_data == NULL)
578 cryptodev->data = cryptodev_data;
580 snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN,
583 cryptodev->data->dev_id = dev_id;
584 cryptodev->data->socket_id = socket_id;
585 cryptodev->data->dev_started = 0;
587 /* init user callbacks */
588 TAILQ_INIT(&(cryptodev->link_intr_cbs));
590 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
592 cryptodev_globals.nb_devs++;
599 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
603 if (cryptodev == NULL)
606 /* Close device only if device operations have been set */
607 if (cryptodev->dev_ops) {
608 ret = rte_cryptodev_close(cryptodev->data->dev_id);
613 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
614 cryptodev_globals.nb_devs--;
619 rte_cryptodev_queue_pair_count(uint8_t dev_id)
621 struct rte_cryptodev *dev;
623 dev = &rte_crypto_devices[dev_id];
624 return dev->data->nb_queue_pairs;
628 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
631 struct rte_cryptodev_info dev_info;
635 if ((dev == NULL) || (nb_qpairs < 1)) {
636 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
641 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
642 nb_qpairs, dev->data->dev_id);
644 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
646 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
647 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
649 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
650 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
651 nb_qpairs, dev->data->dev_id);
655 if (dev->data->queue_pairs == NULL) { /* first time configuration */
656 dev->data->queue_pairs = rte_zmalloc_socket(
657 "cryptodev->queue_pairs",
658 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
659 RTE_CACHE_LINE_SIZE, socket_id);
661 if (dev->data->queue_pairs == NULL) {
662 dev->data->nb_queue_pairs = 0;
663 CDEV_LOG_ERR("failed to get memory for qp meta data, "
668 } else { /* re-configure */
670 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
672 qp = dev->data->queue_pairs;
674 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
677 for (i = nb_qpairs; i < old_nb_queues; i++) {
678 ret = (*dev->dev_ops->queue_pair_release)(dev, i);
683 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
684 RTE_CACHE_LINE_SIZE);
686 CDEV_LOG_ERR("failed to realloc qp meta data,"
687 " nb_queues %u", nb_qpairs);
691 if (nb_qpairs > old_nb_queues) {
692 uint16_t new_qs = nb_qpairs - old_nb_queues;
694 memset(qp + old_nb_queues, 0,
695 sizeof(qp[0]) * new_qs);
698 dev->data->queue_pairs = qp;
701 dev->data->nb_queue_pairs = nb_qpairs;
706 rte_cryptodev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id)
708 struct rte_cryptodev *dev;
710 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
711 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
715 dev = &rte_crypto_devices[dev_id];
716 if (queue_pair_id >= dev->data->nb_queue_pairs) {
717 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
721 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_start, -ENOTSUP);
723 return dev->dev_ops->queue_pair_start(dev, queue_pair_id);
728 rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
730 struct rte_cryptodev *dev;
732 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
733 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
737 dev = &rte_crypto_devices[dev_id];
738 if (queue_pair_id >= dev->data->nb_queue_pairs) {
739 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
743 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_stop, -ENOTSUP);
745 return dev->dev_ops->queue_pair_stop(dev, queue_pair_id);
750 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
752 struct rte_cryptodev *dev;
755 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
756 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
760 dev = &rte_crypto_devices[dev_id];
762 if (dev->data->dev_started) {
764 "device %d must be stopped to allow configuration", dev_id);
768 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
770 /* Setup new number of queue pairs and reconfigure device. */
771 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
774 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
779 return (*dev->dev_ops->dev_configure)(dev, config);
784 rte_cryptodev_start(uint8_t dev_id)
786 struct rte_cryptodev *dev;
789 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
791 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
792 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
796 dev = &rte_crypto_devices[dev_id];
798 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
800 if (dev->data->dev_started != 0) {
801 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
806 diag = (*dev->dev_ops->dev_start)(dev);
808 dev->data->dev_started = 1;
816 rte_cryptodev_stop(uint8_t dev_id)
818 struct rte_cryptodev *dev;
820 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
821 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
825 dev = &rte_crypto_devices[dev_id];
827 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
829 if (dev->data->dev_started == 0) {
830 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
835 (*dev->dev_ops->dev_stop)(dev);
836 dev->data->dev_started = 0;
840 rte_cryptodev_close(uint8_t dev_id)
842 struct rte_cryptodev *dev;
845 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
846 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
850 dev = &rte_crypto_devices[dev_id];
852 /* Device must be stopped before it can be closed */
853 if (dev->data->dev_started == 1) {
854 CDEV_LOG_ERR("Device %u must be stopped before closing",
859 /* We can't close the device if there are outstanding sessions in use */
860 if (dev->data->session_pool != NULL) {
861 if (!rte_mempool_full(dev->data->session_pool)) {
862 CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
863 "has sessions still in use, free "
864 "all sessions before calling close",
870 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
871 retval = (*dev->dev_ops->dev_close)(dev);
880 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
881 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
882 struct rte_mempool *session_pool)
885 struct rte_cryptodev *dev;
887 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
888 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
892 dev = &rte_crypto_devices[dev_id];
893 if (queue_pair_id >= dev->data->nb_queue_pairs) {
894 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
898 if (dev->data->dev_started) {
900 "device %d must be stopped to allow configuration", dev_id);
904 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
906 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
907 socket_id, session_pool);
912 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
914 struct rte_cryptodev *dev;
916 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
917 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
922 CDEV_LOG_ERR("Invalid stats ptr");
926 dev = &rte_crypto_devices[dev_id];
927 memset(stats, 0, sizeof(*stats));
929 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
930 (*dev->dev_ops->stats_get)(dev, stats);
935 rte_cryptodev_stats_reset(uint8_t dev_id)
937 struct rte_cryptodev *dev;
939 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
940 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
944 dev = &rte_crypto_devices[dev_id];
946 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
947 (*dev->dev_ops->stats_reset)(dev);
952 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
954 struct rte_cryptodev *dev;
956 if (dev_id >= cryptodev_globals.nb_devs) {
957 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
961 dev = &rte_crypto_devices[dev_id];
963 memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
965 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
966 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
968 dev_info->driver_name = dev->device->driver->name;
973 rte_cryptodev_callback_register(uint8_t dev_id,
974 enum rte_cryptodev_event_type event,
975 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
977 struct rte_cryptodev *dev;
978 struct rte_cryptodev_callback *user_cb;
983 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
984 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
988 dev = &rte_crypto_devices[dev_id];
989 rte_spinlock_lock(&rte_cryptodev_cb_lock);
991 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
992 if (user_cb->cb_fn == cb_fn &&
993 user_cb->cb_arg == cb_arg &&
994 user_cb->event == event) {
999 /* create a new callback. */
1000 if (user_cb == NULL) {
1001 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1002 sizeof(struct rte_cryptodev_callback), 0);
1003 if (user_cb != NULL) {
1004 user_cb->cb_fn = cb_fn;
1005 user_cb->cb_arg = cb_arg;
1006 user_cb->event = event;
1007 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1011 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1012 return (user_cb == NULL) ? -ENOMEM : 0;
1016 rte_cryptodev_callback_unregister(uint8_t dev_id,
1017 enum rte_cryptodev_event_type event,
1018 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1021 struct rte_cryptodev *dev;
1022 struct rte_cryptodev_callback *cb, *next;
1027 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1028 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1032 dev = &rte_crypto_devices[dev_id];
1033 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1036 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1038 next = TAILQ_NEXT(cb, next);
1040 if (cb->cb_fn != cb_fn || cb->event != event ||
1041 (cb->cb_arg != (void *)-1 &&
1042 cb->cb_arg != cb_arg))
1046 * if this callback is not executing right now,
1049 if (cb->active == 0) {
1050 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1057 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1062 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1063 enum rte_cryptodev_event_type event)
1065 struct rte_cryptodev_callback *cb_lst;
1066 struct rte_cryptodev_callback dev_cb;
1068 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1069 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1070 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1074 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1075 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1077 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1080 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1085 rte_cryptodev_sym_session_init(uint8_t dev_id,
1086 struct rte_cryptodev_sym_session *sess,
1087 struct rte_crypto_sym_xform *xforms,
1088 struct rte_mempool *mp)
1090 struct rte_cryptodev *dev;
1094 dev = rte_cryptodev_pmd_get_dev(dev_id);
1096 if (sess == NULL || xforms == NULL || dev == NULL)
1099 index = dev->driver_id;
1101 if (sess->sess_private_data[index] == NULL) {
1102 ret = dev->dev_ops->session_configure(dev, xforms, sess, mp);
1105 "dev_id %d failed to configure session details",
1114 struct rte_cryptodev_sym_session *
1115 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1117 struct rte_cryptodev_sym_session *sess;
1119 /* Allocate a session structure from the session pool */
1120 if (rte_mempool_get(mp, (void **)&sess)) {
1121 CDEV_LOG_ERR("couldn't get object from session mempool");
1125 /* Clear device session pointer.
1126 * Include the flag indicating presence of private data
1128 memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1134 rte_cryptodev_queue_pair_attach_sym_session(uint8_t dev_id, uint16_t qp_id,
1135 struct rte_cryptodev_sym_session *sess)
1137 struct rte_cryptodev *dev;
1139 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1140 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1144 dev = &rte_crypto_devices[dev_id];
1146 /* The API is optional, not returning error if driver do not suuport */
1147 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_attach_session, 0);
1149 void *sess_priv = get_session_private_data(sess, dev->driver_id);
1151 if (dev->dev_ops->qp_attach_session(dev, qp_id, sess_priv)) {
1152 CDEV_LOG_ERR("dev_id %d failed to attach qp: %d with session",
1161 rte_cryptodev_queue_pair_detach_sym_session(uint8_t dev_id, uint16_t qp_id,
1162 struct rte_cryptodev_sym_session *sess)
1164 struct rte_cryptodev *dev;
1166 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1167 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1171 dev = &rte_crypto_devices[dev_id];
1173 /* The API is optional, not returning error if driver do not suuport */
1174 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_detach_session, 0);
1176 void *sess_priv = get_session_private_data(sess, dev->driver_id);
1178 if (dev->dev_ops->qp_detach_session(dev, qp_id, sess_priv)) {
1179 CDEV_LOG_ERR("dev_id %d failed to detach qp: %d from session",
1188 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1189 struct rte_cryptodev_sym_session *sess)
1191 struct rte_cryptodev *dev;
1193 dev = rte_cryptodev_pmd_get_dev(dev_id);
1195 if (dev == NULL || sess == NULL)
1198 dev->dev_ops->session_clear(dev, sess);
1204 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1208 struct rte_mempool *sess_mp;
1213 /* Check that all device private data has been freed */
1214 for (i = 0; i < nb_drivers; i++) {
1215 sess_priv = get_session_private_data(sess, i);
1216 if (sess_priv != NULL)
1220 /* Return session to mempool */
1221 sess_mp = rte_mempool_from_obj(sess);
1222 rte_mempool_put(sess_mp, sess);
1228 rte_cryptodev_get_header_session_size(void)
1230 return rte_cryptodev_sym_get_header_session_size();
1234 rte_cryptodev_sym_get_header_session_size(void)
1237 * Header contains pointers to the private data
1238 * of all registered drivers, and a flag which
1239 * indicates presence of private data
1241 return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1245 rte_cryptodev_get_private_session_size(uint8_t dev_id)
1247 return rte_cryptodev_sym_get_private_session_size(dev_id);
1251 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1253 struct rte_cryptodev *dev;
1254 unsigned int header_size = sizeof(void *) * nb_drivers;
1255 unsigned int priv_sess_size;
1257 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1260 dev = rte_cryptodev_pmd_get_dev(dev_id);
1262 if (*dev->dev_ops->session_get_size == NULL)
1265 priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
1268 * If size is less than session header size,
1269 * return the latter, as this guarantees that
1270 * sessionless operations will work
1272 if (priv_sess_size < header_size)
1275 return priv_sess_size;
1279 int __rte_experimental
1280 rte_cryptodev_sym_session_set_private_data(
1281 struct rte_cryptodev_sym_session *sess,
1285 uint16_t off_set = sizeof(void *) * nb_drivers;
1286 uint8_t *private_data_present = (uint8_t *)sess + off_set;
1291 *private_data_present = 1;
1292 off_set += sizeof(uint8_t);
1293 rte_memcpy((uint8_t *)sess + off_set, data, size);
1297 void * __rte_experimental
1298 rte_cryptodev_sym_session_get_private_data(
1299 struct rte_cryptodev_sym_session *sess)
1301 uint16_t off_set = sizeof(void *) * nb_drivers;
1302 uint8_t *private_data_present = (uint8_t *)sess + off_set;
1304 if (sess == NULL || !*private_data_present)
1307 off_set += sizeof(uint8_t);
1308 return (uint8_t *)sess + off_set;
1311 /** Initialise rte_crypto_op mempool element */
1313 rte_crypto_op_init(struct rte_mempool *mempool,
1316 __rte_unused unsigned i)
1318 struct rte_crypto_op *op = _op_data;
1319 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1321 memset(_op_data, 0, mempool->elt_size);
1323 __rte_crypto_op_reset(op, type);
1325 op->phys_addr = rte_mem_virt2iova(_op_data);
1326 op->mempool = mempool;
1330 struct rte_mempool *
1331 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1332 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1335 struct rte_crypto_op_pool_private *priv;
1337 unsigned elt_size = sizeof(struct rte_crypto_op) +
1338 sizeof(struct rte_crypto_sym_op) +
1341 /* lookup mempool in case already allocated */
1342 struct rte_mempool *mp = rte_mempool_lookup(name);
1345 priv = (struct rte_crypto_op_pool_private *)
1346 rte_mempool_get_priv(mp);
1348 if (mp->elt_size != elt_size ||
1349 mp->cache_size < cache_size ||
1350 mp->size < nb_elts ||
1351 priv->priv_size < priv_size) {
1353 CDEV_LOG_ERR("Mempool %s already exists but with "
1354 "incompatible parameters", name);
1360 mp = rte_mempool_create(
1365 sizeof(struct rte_crypto_op_pool_private),
1374 CDEV_LOG_ERR("Failed to create mempool %s", name);
1378 priv = (struct rte_crypto_op_pool_private *)
1379 rte_mempool_get_priv(mp);
1381 priv->priv_size = priv_size;
1388 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1390 struct rte_cryptodev *dev = NULL;
1396 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1397 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1398 "%s_%u", dev_name_prefix, i);
1403 dev = rte_cryptodev_pmd_get_named_dev(name);
1411 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1413 static struct cryptodev_driver_list cryptodev_driver_list =
1414 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1417 rte_cryptodev_driver_id_get(const char *name)
1419 struct cryptodev_driver *driver;
1420 const char *driver_name;
1423 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1427 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1428 driver_name = driver->driver->name;
1429 if (strncmp(driver_name, name, strlen(driver_name)) == 0)
1436 rte_cryptodev_name_get(uint8_t dev_id)
1438 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id);
1443 return dev->data->name;
1447 rte_cryptodev_driver_name_get(uint8_t driver_id)
1449 struct cryptodev_driver *driver;
1451 TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1452 if (driver->id == driver_id)
1453 return driver->driver->name;
1458 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1459 const struct rte_driver *drv)
1461 crypto_drv->driver = drv;
1462 crypto_drv->id = nb_drivers;
1464 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1466 return nb_drivers++;