1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
15 #include <netinet/in.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
44 static uint8_t nb_drivers;
46 struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
48 struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0];
50 static struct rte_cryptodev_global cryptodev_globals = {
51 .devs = &rte_crypto_devices[0],
54 .max_devs = RTE_CRYPTO_MAX_DEVS
57 struct rte_cryptodev_global *rte_cryptodev_globals = &cryptodev_globals;
59 /* spinlock for crypto device callbacks */
60 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
64 * The user application callback description.
66 * It contains callback address to be registered by user application,
67 * the pointer to the parameters for callback, and the event type.
69 struct rte_cryptodev_callback {
70 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
71 rte_cryptodev_cb_fn cb_fn; /**< Callback address */
72 void *cb_arg; /**< Parameter for callback */
73 enum rte_cryptodev_event_type event; /**< Interrupt event type */
74 uint32_t active; /**< Callback is executing */
78 * The crypto cipher algorithm strings identifiers.
79 * It could be used in application command line.
82 rte_crypto_cipher_algorithm_strings[] = {
83 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc",
84 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb",
85 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr",
87 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc",
88 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr",
89 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi",
90 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb",
91 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8",
92 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts",
94 [RTE_CRYPTO_CIPHER_ARC4] = "arc4",
96 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc",
97 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi",
99 [RTE_CRYPTO_CIPHER_NULL] = "null",
101 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8",
102 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
103 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3"
107 * The crypto cipher operation strings identifiers.
108 * It could be used in application command line.
111 rte_crypto_cipher_operation_strings[] = {
112 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt",
113 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt"
117 * The crypto auth algorithm strings identifiers.
118 * It could be used in application command line.
121 rte_crypto_auth_algorithm_strings[] = {
122 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac",
123 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac",
124 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac",
125 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac",
127 [RTE_CRYPTO_AUTH_MD5] = "md5",
128 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac",
130 [RTE_CRYPTO_AUTH_NULL] = "null",
132 [RTE_CRYPTO_AUTH_SHA1] = "sha1",
133 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac",
135 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224",
136 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac",
137 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256",
138 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac",
139 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384",
140 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac",
141 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512",
142 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac",
144 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9",
145 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2",
146 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3"
150 * The crypto AEAD algorithm strings identifiers.
151 * It could be used in application command line.
154 rte_crypto_aead_algorithm_strings[] = {
155 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm",
156 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm",
160 * The crypto AEAD operation strings identifiers.
161 * It could be used in application command line.
164 rte_crypto_aead_operation_strings[] = {
165 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt",
166 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt"
170 * Asymmetric crypto transform operation strings identifiers.
172 const char *rte_crypto_asym_xform_strings[] = {
173 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none",
174 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa",
175 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp",
176 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv",
177 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh",
178 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa",
182 * Asymmetric crypto operation strings identifiers.
184 const char *rte_crypto_asym_op_strings[] = {
185 [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt",
186 [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt",
187 [RTE_CRYPTO_ASYM_OP_SIGN] = "sign",
188 [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify",
189 [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE] = "priv_key_generate",
190 [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
191 [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
195 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
196 const char *algo_string)
200 for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
201 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
202 *algo_enum = (enum rte_crypto_cipher_algorithm) i;
212 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
213 const char *algo_string)
217 for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
218 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
219 *algo_enum = (enum rte_crypto_auth_algorithm) i;
229 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
230 const char *algo_string)
234 for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
235 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
236 *algo_enum = (enum rte_crypto_aead_algorithm) i;
246 * The crypto auth operation strings identifiers.
247 * It could be used in application command line.
250 rte_crypto_auth_operation_strings[] = {
251 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify",
252 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate"
255 const struct rte_cryptodev_symmetric_capability *
256 rte_cryptodev_sym_capability_get(uint8_t dev_id,
257 const struct rte_cryptodev_sym_capability_idx *idx)
259 const struct rte_cryptodev_capabilities *capability;
260 struct rte_cryptodev_info dev_info;
263 rte_cryptodev_info_get(dev_id, &dev_info);
265 while ((capability = &dev_info.capabilities[i++])->op !=
266 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
267 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
270 if (capability->sym.xform_type != idx->type)
273 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
274 capability->sym.auth.algo == idx->algo.auth)
275 return &capability->sym;
277 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
278 capability->sym.cipher.algo == idx->algo.cipher)
279 return &capability->sym;
281 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
282 capability->sym.aead.algo == idx->algo.aead)
283 return &capability->sym;
291 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
293 unsigned int next_size;
295 /* Check lower/upper bounds */
296 if (size < range->min)
299 if (size > range->max)
302 /* If range is actually only one value, size is correct */
303 if (range->increment == 0)
306 /* Check if value is one of the supported sizes */
307 for (next_size = range->min; next_size <= range->max;
308 next_size += range->increment)
309 if (size == next_size)
316 rte_cryptodev_sym_capability_check_cipher(
317 const struct rte_cryptodev_symmetric_capability *capability,
318 uint16_t key_size, uint16_t iv_size)
320 if (param_range_check(key_size, &capability->cipher.key_size) != 0)
323 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
330 rte_cryptodev_sym_capability_check_auth(
331 const struct rte_cryptodev_symmetric_capability *capability,
332 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
334 if (param_range_check(key_size, &capability->auth.key_size) != 0)
337 if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
340 if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
347 rte_cryptodev_sym_capability_check_aead(
348 const struct rte_cryptodev_symmetric_capability *capability,
349 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
352 if (param_range_check(key_size, &capability->aead.key_size) != 0)
355 if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
358 if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
361 if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
368 rte_cryptodev_get_feature_name(uint64_t flag)
371 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
372 return "SYMMETRIC_CRYPTO";
373 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
374 return "ASYMMETRIC_CRYPTO";
375 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
376 return "SYM_OPERATION_CHAINING";
377 case RTE_CRYPTODEV_FF_CPU_SSE:
379 case RTE_CRYPTODEV_FF_CPU_AVX:
381 case RTE_CRYPTODEV_FF_CPU_AVX2:
383 case RTE_CRYPTODEV_FF_CPU_AVX512:
385 case RTE_CRYPTODEV_FF_CPU_AESNI:
387 case RTE_CRYPTODEV_FF_HW_ACCELERATED:
388 return "HW_ACCELERATED";
389 case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
390 return "IN_PLACE_SGL";
391 case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
392 return "OOP_SGL_IN_SGL_OUT";
393 case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
394 return "OOP_SGL_IN_LB_OUT";
395 case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
396 return "OOP_LB_IN_SGL_OUT";
397 case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
398 return "OOP_LB_IN_LB_OUT";
399 case RTE_CRYPTODEV_FF_CPU_NEON:
401 case RTE_CRYPTODEV_FF_CPU_ARM_CE:
403 case RTE_CRYPTODEV_FF_SECURITY:
404 return "SECURITY_PROTOCOL";
410 struct rte_cryptodev *
411 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
413 return &rte_cryptodev_globals->devs[dev_id];
416 struct rte_cryptodev *
417 rte_cryptodev_pmd_get_named_dev(const char *name)
419 struct rte_cryptodev *dev;
425 for (i = 0; i < rte_cryptodev_globals->max_devs; i++) {
426 dev = &rte_cryptodev_globals->devs[i];
428 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
429 (strcmp(dev->data->name, name) == 0))
437 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
439 struct rte_cryptodev *dev = NULL;
441 if (dev_id >= rte_cryptodev_globals->nb_devs)
444 dev = rte_cryptodev_pmd_get_dev(dev_id);
445 if (dev->attached != RTE_CRYPTODEV_ATTACHED)
453 rte_cryptodev_get_dev_id(const char *name)
460 for (i = 0; i < rte_cryptodev_globals->nb_devs; i++)
461 if ((strcmp(rte_cryptodev_globals->devs[i].data->name, name)
463 (rte_cryptodev_globals->devs[i].attached ==
464 RTE_CRYPTODEV_ATTACHED))
471 rte_cryptodev_count(void)
473 return rte_cryptodev_globals->nb_devs;
477 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
479 uint8_t i, dev_count = 0;
481 for (i = 0; i < rte_cryptodev_globals->max_devs; i++)
482 if (rte_cryptodev_globals->devs[i].driver_id == driver_id &&
483 rte_cryptodev_globals->devs[i].attached ==
484 RTE_CRYPTODEV_ATTACHED)
491 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
494 uint8_t i, count = 0;
495 struct rte_cryptodev *devs = rte_cryptodev_globals->devs;
496 uint8_t max_devs = rte_cryptodev_globals->max_devs;
498 for (i = 0; i < max_devs && count < nb_devices; i++) {
500 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
503 cmp = strncmp(devs[i].device->driver->name,
505 strlen(driver_name));
508 devices[count++] = devs[i].data->dev_id;
516 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
518 if (rte_crypto_devices[dev_id].feature_flags &
519 RTE_CRYPTODEV_FF_SECURITY)
520 return rte_crypto_devices[dev_id].security_ctx;
526 rte_cryptodev_socket_id(uint8_t dev_id)
528 struct rte_cryptodev *dev;
530 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
533 dev = rte_cryptodev_pmd_get_dev(dev_id);
535 return dev->data->socket_id;
539 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
542 char mz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
543 const struct rte_memzone *mz;
546 /* generate memzone name */
547 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
548 if (n >= (int)sizeof(mz_name))
551 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
552 mz = rte_memzone_reserve(mz_name,
553 sizeof(struct rte_cryptodev_data),
556 mz = rte_memzone_lookup(mz_name);
562 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
563 memset(*data, 0, sizeof(struct rte_cryptodev_data));
569 rte_cryptodev_find_free_device_index(void)
573 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
574 if (rte_crypto_devices[dev_id].attached ==
575 RTE_CRYPTODEV_DETACHED)
578 return RTE_CRYPTO_MAX_DEVS;
581 struct rte_cryptodev *
582 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
584 struct rte_cryptodev *cryptodev;
587 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
588 CDEV_LOG_ERR("Crypto device with name %s already "
593 dev_id = rte_cryptodev_find_free_device_index();
594 if (dev_id == RTE_CRYPTO_MAX_DEVS) {
595 CDEV_LOG_ERR("Reached maximum number of crypto devices");
599 cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
601 if (cryptodev->data == NULL) {
602 struct rte_cryptodev_data *cryptodev_data =
603 cryptodev_globals.data[dev_id];
605 int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data,
608 if (retval < 0 || cryptodev_data == NULL)
611 cryptodev->data = cryptodev_data;
613 snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN,
616 cryptodev->data->dev_id = dev_id;
617 cryptodev->data->socket_id = socket_id;
618 cryptodev->data->dev_started = 0;
620 /* init user callbacks */
621 TAILQ_INIT(&(cryptodev->link_intr_cbs));
623 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
625 cryptodev_globals.nb_devs++;
632 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
636 if (cryptodev == NULL)
639 /* Close device only if device operations have been set */
640 if (cryptodev->dev_ops) {
641 ret = rte_cryptodev_close(cryptodev->data->dev_id);
646 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
647 cryptodev_globals.nb_devs--;
652 rte_cryptodev_queue_pair_count(uint8_t dev_id)
654 struct rte_cryptodev *dev;
656 dev = &rte_crypto_devices[dev_id];
657 return dev->data->nb_queue_pairs;
661 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
664 struct rte_cryptodev_info dev_info;
668 if ((dev == NULL) || (nb_qpairs < 1)) {
669 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
674 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
675 nb_qpairs, dev->data->dev_id);
677 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
679 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
680 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
682 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
683 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
684 nb_qpairs, dev->data->dev_id);
688 if (dev->data->queue_pairs == NULL) { /* first time configuration */
689 dev->data->queue_pairs = rte_zmalloc_socket(
690 "cryptodev->queue_pairs",
691 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
692 RTE_CACHE_LINE_SIZE, socket_id);
694 if (dev->data->queue_pairs == NULL) {
695 dev->data->nb_queue_pairs = 0;
696 CDEV_LOG_ERR("failed to get memory for qp meta data, "
701 } else { /* re-configure */
703 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
705 qp = dev->data->queue_pairs;
707 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
710 for (i = nb_qpairs; i < old_nb_queues; i++) {
711 ret = (*dev->dev_ops->queue_pair_release)(dev, i);
716 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
717 RTE_CACHE_LINE_SIZE);
719 CDEV_LOG_ERR("failed to realloc qp meta data,"
720 " nb_queues %u", nb_qpairs);
724 if (nb_qpairs > old_nb_queues) {
725 uint16_t new_qs = nb_qpairs - old_nb_queues;
727 memset(qp + old_nb_queues, 0,
728 sizeof(qp[0]) * new_qs);
731 dev->data->queue_pairs = qp;
734 dev->data->nb_queue_pairs = nb_qpairs;
739 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
741 struct rte_cryptodev *dev;
744 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
745 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
749 dev = &rte_crypto_devices[dev_id];
751 if (dev->data->dev_started) {
753 "device %d must be stopped to allow configuration", dev_id);
757 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
759 /* Setup new number of queue pairs and reconfigure device. */
760 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
763 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
768 return (*dev->dev_ops->dev_configure)(dev, config);
773 rte_cryptodev_start(uint8_t dev_id)
775 struct rte_cryptodev *dev;
778 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
780 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
781 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
785 dev = &rte_crypto_devices[dev_id];
787 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
789 if (dev->data->dev_started != 0) {
790 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
795 diag = (*dev->dev_ops->dev_start)(dev);
797 dev->data->dev_started = 1;
805 rte_cryptodev_stop(uint8_t dev_id)
807 struct rte_cryptodev *dev;
809 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
810 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
814 dev = &rte_crypto_devices[dev_id];
816 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
818 if (dev->data->dev_started == 0) {
819 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
824 (*dev->dev_ops->dev_stop)(dev);
825 dev->data->dev_started = 0;
829 rte_cryptodev_close(uint8_t dev_id)
831 struct rte_cryptodev *dev;
834 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
835 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
839 dev = &rte_crypto_devices[dev_id];
841 /* Device must be stopped before it can be closed */
842 if (dev->data->dev_started == 1) {
843 CDEV_LOG_ERR("Device %u must be stopped before closing",
848 /* We can't close the device if there are outstanding sessions in use */
849 if (dev->data->session_pool != NULL) {
850 if (!rte_mempool_full(dev->data->session_pool)) {
851 CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
852 "has sessions still in use, free "
853 "all sessions before calling close",
859 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
860 retval = (*dev->dev_ops->dev_close)(dev);
869 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
870 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
871 struct rte_mempool *session_pool)
874 struct rte_cryptodev *dev;
876 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
877 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
881 dev = &rte_crypto_devices[dev_id];
882 if (queue_pair_id >= dev->data->nb_queue_pairs) {
883 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
887 if (dev->data->dev_started) {
889 "device %d must be stopped to allow configuration", dev_id);
893 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
895 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
896 socket_id, session_pool);
901 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
903 struct rte_cryptodev *dev;
905 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
906 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
911 CDEV_LOG_ERR("Invalid stats ptr");
915 dev = &rte_crypto_devices[dev_id];
916 memset(stats, 0, sizeof(*stats));
918 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
919 (*dev->dev_ops->stats_get)(dev, stats);
924 rte_cryptodev_stats_reset(uint8_t dev_id)
926 struct rte_cryptodev *dev;
928 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
929 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
933 dev = &rte_crypto_devices[dev_id];
935 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
936 (*dev->dev_ops->stats_reset)(dev);
941 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
943 struct rte_cryptodev *dev;
945 if (dev_id >= cryptodev_globals.nb_devs) {
946 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
950 dev = &rte_crypto_devices[dev_id];
952 memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
954 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
955 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
957 dev_info->driver_name = dev->device->driver->name;
958 dev_info->device = dev->device;
963 rte_cryptodev_callback_register(uint8_t dev_id,
964 enum rte_cryptodev_event_type event,
965 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
967 struct rte_cryptodev *dev;
968 struct rte_cryptodev_callback *user_cb;
973 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
974 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
978 dev = &rte_crypto_devices[dev_id];
979 rte_spinlock_lock(&rte_cryptodev_cb_lock);
981 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
982 if (user_cb->cb_fn == cb_fn &&
983 user_cb->cb_arg == cb_arg &&
984 user_cb->event == event) {
989 /* create a new callback. */
990 if (user_cb == NULL) {
991 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
992 sizeof(struct rte_cryptodev_callback), 0);
993 if (user_cb != NULL) {
994 user_cb->cb_fn = cb_fn;
995 user_cb->cb_arg = cb_arg;
996 user_cb->event = event;
997 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1001 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1002 return (user_cb == NULL) ? -ENOMEM : 0;
1006 rte_cryptodev_callback_unregister(uint8_t dev_id,
1007 enum rte_cryptodev_event_type event,
1008 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1011 struct rte_cryptodev *dev;
1012 struct rte_cryptodev_callback *cb, *next;
1017 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1018 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1022 dev = &rte_crypto_devices[dev_id];
1023 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1026 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1028 next = TAILQ_NEXT(cb, next);
1030 if (cb->cb_fn != cb_fn || cb->event != event ||
1031 (cb->cb_arg != (void *)-1 &&
1032 cb->cb_arg != cb_arg))
1036 * if this callback is not executing right now,
1039 if (cb->active == 0) {
1040 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1047 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1052 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1053 enum rte_cryptodev_event_type event)
1055 struct rte_cryptodev_callback *cb_lst;
1056 struct rte_cryptodev_callback dev_cb;
1058 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1059 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1060 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1064 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1065 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1067 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1070 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1075 rte_cryptodev_sym_session_init(uint8_t dev_id,
1076 struct rte_cryptodev_sym_session *sess,
1077 struct rte_crypto_sym_xform *xforms,
1078 struct rte_mempool *mp)
1080 struct rte_cryptodev *dev;
1084 dev = rte_cryptodev_pmd_get_dev(dev_id);
1086 if (sess == NULL || xforms == NULL || dev == NULL)
1089 index = dev->driver_id;
1091 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1093 if (sess->sess_private_data[index] == NULL) {
1094 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1098 "dev_id %d failed to configure session details",
1107 int __rte_experimental
1108 rte_cryptodev_asym_session_init(uint8_t dev_id,
1109 struct rte_cryptodev_asym_session *sess,
1110 struct rte_crypto_asym_xform *xforms,
1111 struct rte_mempool *mp)
1113 struct rte_cryptodev *dev;
1117 dev = rte_cryptodev_pmd_get_dev(dev_id);
1119 if (sess == NULL || xforms == NULL || dev == NULL)
1122 index = dev->driver_id;
1124 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1127 if (sess->sess_private_data[index] == NULL) {
1128 ret = dev->dev_ops->asym_session_configure(dev,
1133 "dev_id %d failed to configure session details",
1142 struct rte_cryptodev_sym_session *
1143 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1145 struct rte_cryptodev_sym_session *sess;
1147 /* Allocate a session structure from the session pool */
1148 if (rte_mempool_get(mp, (void **)&sess)) {
1149 CDEV_LOG_ERR("couldn't get object from session mempool");
1153 /* Clear device session pointer.
1154 * Include the flag indicating presence of user data
1156 memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1161 struct rte_cryptodev_asym_session * __rte_experimental
1162 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1164 struct rte_cryptodev_asym_session *sess;
1166 /* Allocate a session structure from the session pool */
1167 if (rte_mempool_get(mp, (void **)&sess)) {
1168 CDEV_LOG_ERR("couldn't get object from session mempool");
1172 /* Clear device session pointer.
1173 * Include the flag indicating presence of private data
1175 memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1181 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1182 struct rte_cryptodev_sym_session *sess)
1184 struct rte_cryptodev *dev;
1186 dev = rte_cryptodev_pmd_get_dev(dev_id);
1188 if (dev == NULL || sess == NULL)
1191 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1193 dev->dev_ops->sym_session_clear(dev, sess);
1198 int __rte_experimental
1199 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1200 struct rte_cryptodev_asym_session *sess)
1202 struct rte_cryptodev *dev;
1204 dev = rte_cryptodev_pmd_get_dev(dev_id);
1206 if (dev == NULL || sess == NULL)
1209 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1211 dev->dev_ops->asym_session_clear(dev, sess);
1217 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1221 struct rte_mempool *sess_mp;
1226 /* Check that all device private data has been freed */
1227 for (i = 0; i < nb_drivers; i++) {
1228 sess_priv = get_sym_session_private_data(sess, i);
1229 if (sess_priv != NULL)
1233 /* Return session to mempool */
1234 sess_mp = rte_mempool_from_obj(sess);
1235 rte_mempool_put(sess_mp, sess);
1240 int __rte_experimental
1241 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1245 struct rte_mempool *sess_mp;
1250 /* Check that all device private data has been freed */
1251 for (i = 0; i < nb_drivers; i++) {
1252 sess_priv = get_asym_session_private_data(sess, i);
1253 if (sess_priv != NULL)
1257 /* Return session to mempool */
1258 sess_mp = rte_mempool_from_obj(sess);
1259 rte_mempool_put(sess_mp, sess);
1266 rte_cryptodev_sym_get_header_session_size(void)
1269 * Header contains pointers to the private data
1270 * of all registered drivers, and a flag which
1271 * indicates presence of user data
1273 return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1276 unsigned int __rte_experimental
1277 rte_cryptodev_asym_get_header_session_size(void)
1280 * Header contains pointers to the private data
1281 * of all registered drivers, and a flag which
1282 * indicates presence of private data
1284 return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1288 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1290 struct rte_cryptodev *dev;
1291 unsigned int header_size = sizeof(void *) * nb_drivers;
1292 unsigned int priv_sess_size;
1294 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1297 dev = rte_cryptodev_pmd_get_dev(dev_id);
1299 if (*dev->dev_ops->sym_session_get_size == NULL)
1302 priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1305 * If size is less than session header size,
1306 * return the latter, as this guarantees that
1307 * sessionless operations will work
1309 if (priv_sess_size < header_size)
1312 return priv_sess_size;
1316 unsigned int __rte_experimental
1317 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1319 struct rte_cryptodev *dev;
1320 unsigned int header_size = sizeof(void *) * nb_drivers;
1321 unsigned int priv_sess_size;
1323 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1326 dev = rte_cryptodev_pmd_get_dev(dev_id);
1328 if (*dev->dev_ops->asym_session_get_size == NULL)
1331 priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1332 if (priv_sess_size < header_size)
1335 return priv_sess_size;
1339 int __rte_experimental
1340 rte_cryptodev_sym_session_set_user_data(
1341 struct rte_cryptodev_sym_session *sess,
1345 uint16_t off_set = sizeof(void *) * nb_drivers;
1346 uint8_t *user_data_present = (uint8_t *)sess + off_set;
1351 *user_data_present = 1;
1352 off_set += sizeof(uint8_t);
1353 rte_memcpy((uint8_t *)sess + off_set, data, size);
1357 void * __rte_experimental
1358 rte_cryptodev_sym_session_get_user_data(
1359 struct rte_cryptodev_sym_session *sess)
1361 uint16_t off_set = sizeof(void *) * nb_drivers;
1362 uint8_t *user_data_present = (uint8_t *)sess + off_set;
1364 if (sess == NULL || !*user_data_present)
1367 off_set += sizeof(uint8_t);
1368 return (uint8_t *)sess + off_set;
1371 /** Initialise rte_crypto_op mempool element */
1373 rte_crypto_op_init(struct rte_mempool *mempool,
1376 __rte_unused unsigned i)
1378 struct rte_crypto_op *op = _op_data;
1379 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1381 memset(_op_data, 0, mempool->elt_size);
1383 __rte_crypto_op_reset(op, type);
1385 op->phys_addr = rte_mem_virt2iova(_op_data);
1386 op->mempool = mempool;
1390 struct rte_mempool *
1391 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1392 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1395 struct rte_crypto_op_pool_private *priv;
1397 unsigned elt_size = sizeof(struct rte_crypto_op) +
1400 if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1401 elt_size += sizeof(struct rte_crypto_sym_op);
1402 } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1403 elt_size += sizeof(struct rte_crypto_asym_op);
1405 CDEV_LOG_ERR("Invalid op_type\n");
1409 /* lookup mempool in case already allocated */
1410 struct rte_mempool *mp = rte_mempool_lookup(name);
1413 priv = (struct rte_crypto_op_pool_private *)
1414 rte_mempool_get_priv(mp);
1416 if (mp->elt_size != elt_size ||
1417 mp->cache_size < cache_size ||
1418 mp->size < nb_elts ||
1419 priv->priv_size < priv_size) {
1421 CDEV_LOG_ERR("Mempool %s already exists but with "
1422 "incompatible parameters", name);
1428 mp = rte_mempool_create(
1433 sizeof(struct rte_crypto_op_pool_private),
1442 CDEV_LOG_ERR("Failed to create mempool %s", name);
1446 priv = (struct rte_crypto_op_pool_private *)
1447 rte_mempool_get_priv(mp);
1449 priv->priv_size = priv_size;
1456 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1458 struct rte_cryptodev *dev = NULL;
1464 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1465 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1466 "%s_%u", dev_name_prefix, i);
1471 dev = rte_cryptodev_pmd_get_named_dev(name);
1479 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1481 static struct cryptodev_driver_list cryptodev_driver_list =
1482 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1485 rte_cryptodev_driver_id_get(const char *name)
1487 struct cryptodev_driver *driver;
1488 const char *driver_name;
1491 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1495 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1496 driver_name = driver->driver->name;
1497 if (strncmp(driver_name, name, strlen(driver_name)) == 0)
1504 rte_cryptodev_name_get(uint8_t dev_id)
1506 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id);
1511 return dev->data->name;
1515 rte_cryptodev_driver_name_get(uint8_t driver_id)
1517 struct cryptodev_driver *driver;
1519 TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1520 if (driver->id == driver_id)
1521 return driver->driver->name;
1526 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1527 const struct rte_driver *drv)
1529 crypto_drv->driver = drv;
1530 crypto_drv->id = nb_drivers;
1532 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1534 return nb_drivers++;