1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020 Intel Corporation
15 #include <netinet/in.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39 #include <rte_compat.h>
40 #include <rte_function_versioning.h>
42 #include "rte_crypto.h"
43 #include "rte_cryptodev.h"
44 #include "rte_cryptodev_pmd.h"
45 #include "rte_cryptodev_trace.h"
47 static uint8_t nb_drivers;
49 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
51 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
53 static struct rte_cryptodev_global cryptodev_globals = {
54 .devs = rte_crypto_devices,
59 /* spinlock for crypto device callbacks */
60 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
62 static const struct rte_cryptodev_capabilities
63 cryptodev_undefined_capabilities[] = {
64 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
67 static struct rte_cryptodev_capabilities
68 *capability_copy[RTE_CRYPTO_MAX_DEVS];
69 static uint8_t is_capability_checked[RTE_CRYPTO_MAX_DEVS];
72 * The user application callback description.
74 * It contains callback address to be registered by user application,
75 * the pointer to the parameters for callback, and the event type.
77 struct rte_cryptodev_callback {
78 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
79 rte_cryptodev_cb_fn cb_fn; /**< Callback address */
80 void *cb_arg; /**< Parameter for callback */
81 enum rte_cryptodev_event_type event; /**< Interrupt event type */
82 uint32_t active; /**< Callback is executing */
86 * The crypto cipher algorithm strings identifiers.
87 * It could be used in application command line.
90 rte_crypto_cipher_algorithm_strings[] = {
91 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc",
92 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb",
93 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr",
95 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc",
96 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr",
97 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi",
98 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb",
99 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8",
100 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts",
102 [RTE_CRYPTO_CIPHER_ARC4] = "arc4",
104 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc",
105 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi",
107 [RTE_CRYPTO_CIPHER_NULL] = "null",
109 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8",
110 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
111 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3"
115 * The crypto cipher operation strings identifiers.
116 * It could be used in application command line.
119 rte_crypto_cipher_operation_strings[] = {
120 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt",
121 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt"
125 * The crypto auth algorithm strings identifiers.
126 * It could be used in application command line.
129 rte_crypto_auth_algorithm_strings[] = {
130 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac",
131 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac",
132 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac",
133 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac",
135 [RTE_CRYPTO_AUTH_MD5] = "md5",
136 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac",
138 [RTE_CRYPTO_AUTH_NULL] = "null",
140 [RTE_CRYPTO_AUTH_SHA1] = "sha1",
141 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac",
143 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224",
144 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac",
145 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256",
146 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac",
147 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384",
148 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac",
149 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512",
150 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac",
152 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9",
153 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2",
154 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3"
158 * The crypto AEAD algorithm strings identifiers.
159 * It could be used in application command line.
162 rte_crypto_aead_algorithm_strings[] = {
163 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm",
164 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm",
165 [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
169 * The crypto AEAD operation strings identifiers.
170 * It could be used in application command line.
173 rte_crypto_aead_operation_strings[] = {
174 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt",
175 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt"
179 * Asymmetric crypto transform operation strings identifiers.
181 const char *rte_crypto_asym_xform_strings[] = {
182 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none",
183 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa",
184 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp",
185 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv",
186 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh",
187 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa",
188 [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa",
189 [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm",
193 * Asymmetric crypto operation strings identifiers.
195 const char *rte_crypto_asym_op_strings[] = {
196 [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt",
197 [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt",
198 [RTE_CRYPTO_ASYM_OP_SIGN] = "sign",
199 [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify",
200 [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE] = "priv_key_generate",
201 [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
202 [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
206 * The private data structure stored in the session mempool private data.
208 struct rte_cryptodev_sym_session_pool_private_data {
210 /**< number of elements in sess_data array */
211 uint16_t user_data_sz;
212 /**< session user data will be placed after sess_data */
216 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
217 const char *algo_string)
221 for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
222 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
223 *algo_enum = (enum rte_crypto_cipher_algorithm) i;
233 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
234 const char *algo_string)
238 for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
239 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
240 *algo_enum = (enum rte_crypto_auth_algorithm) i;
250 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
251 const char *algo_string)
255 for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
256 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
257 *algo_enum = (enum rte_crypto_aead_algorithm) i;
267 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
268 const char *xform_string)
272 for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
273 if (strcmp(xform_string,
274 rte_crypto_asym_xform_strings[i]) == 0) {
275 *xform_enum = (enum rte_crypto_asym_xform_type) i;
285 * The crypto auth operation strings identifiers.
286 * It could be used in application command line.
289 rte_crypto_auth_operation_strings[] = {
290 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify",
291 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate"
294 const struct rte_cryptodev_symmetric_capability __vsym *
295 rte_cryptodev_sym_capability_get_v20(uint8_t dev_id,
296 const struct rte_cryptodev_sym_capability_idx *idx)
298 const struct rte_cryptodev_capabilities *capability;
299 struct rte_cryptodev_info dev_info;
302 rte_cryptodev_info_get_v20(dev_id, &dev_info);
304 while ((capability = &dev_info.capabilities[i++])->op !=
305 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
306 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
309 if (capability->sym.xform_type != idx->type)
312 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
313 capability->sym.auth.algo == idx->algo.auth)
314 return &capability->sym;
316 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
317 capability->sym.cipher.algo == idx->algo.cipher)
318 return &capability->sym;
320 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
321 capability->sym.aead.algo == idx->algo.aead)
322 return &capability->sym;
327 VERSION_SYMBOL(rte_cryptodev_sym_capability_get, _v20, 20.0);
329 const struct rte_cryptodev_symmetric_capability __vsym *
330 rte_cryptodev_sym_capability_get_v21(uint8_t dev_id,
331 const struct rte_cryptodev_sym_capability_idx *idx)
333 const struct rte_cryptodev_capabilities *capability;
334 struct rte_cryptodev_info dev_info;
337 rte_cryptodev_info_get(dev_id, &dev_info);
339 while ((capability = &dev_info.capabilities[i++])->op !=
340 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
341 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
344 if (capability->sym.xform_type != idx->type)
347 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
348 capability->sym.auth.algo == idx->algo.auth)
349 return &capability->sym;
351 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
352 capability->sym.cipher.algo == idx->algo.cipher)
353 return &capability->sym;
355 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
356 capability->sym.aead.algo == idx->algo.aead)
357 return &capability->sym;
362 MAP_STATIC_SYMBOL(const struct rte_cryptodev_symmetric_capability *
363 rte_cryptodev_sym_capability_get(uint8_t dev_id,
364 const struct rte_cryptodev_sym_capability_idx *idx),
365 rte_cryptodev_sym_capability_get_v21);
366 BIND_DEFAULT_SYMBOL(rte_cryptodev_sym_capability_get, _v21, 21);
369 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
371 unsigned int next_size;
373 /* Check lower/upper bounds */
374 if (size < range->min)
377 if (size > range->max)
380 /* If range is actually only one value, size is correct */
381 if (range->increment == 0)
384 /* Check if value is one of the supported sizes */
385 for (next_size = range->min; next_size <= range->max;
386 next_size += range->increment)
387 if (size == next_size)
393 const struct rte_cryptodev_asymmetric_xform_capability *
394 rte_cryptodev_asym_capability_get(uint8_t dev_id,
395 const struct rte_cryptodev_asym_capability_idx *idx)
397 const struct rte_cryptodev_capabilities *capability;
398 struct rte_cryptodev_info dev_info;
401 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
402 rte_cryptodev_info_get(dev_id, &dev_info);
404 while ((capability = &dev_info.capabilities[i++])->op !=
405 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
406 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
409 if (capability->asym.xform_capa.xform_type == idx->type)
410 return &capability->asym.xform_capa;
416 rte_cryptodev_sym_capability_check_cipher(
417 const struct rte_cryptodev_symmetric_capability *capability,
418 uint16_t key_size, uint16_t iv_size)
420 if (param_range_check(key_size, &capability->cipher.key_size) != 0)
423 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
430 rte_cryptodev_sym_capability_check_auth(
431 const struct rte_cryptodev_symmetric_capability *capability,
432 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
434 if (param_range_check(key_size, &capability->auth.key_size) != 0)
437 if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
440 if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
447 rte_cryptodev_sym_capability_check_aead(
448 const struct rte_cryptodev_symmetric_capability *capability,
449 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
452 if (param_range_check(key_size, &capability->aead.key_size) != 0)
455 if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
458 if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
461 if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
467 rte_cryptodev_asym_xform_capability_check_optype(
468 const struct rte_cryptodev_asymmetric_xform_capability *capability,
469 enum rte_crypto_asym_op_type op_type)
471 if (capability->op_types & (1 << op_type))
478 rte_cryptodev_asym_xform_capability_check_modlen(
479 const struct rte_cryptodev_asymmetric_xform_capability *capability,
482 /* no need to check for limits, if min or max = 0 */
483 if (capability->modlen.min != 0) {
484 if (modlen < capability->modlen.min)
488 if (capability->modlen.max != 0) {
489 if (modlen > capability->modlen.max)
493 /* in any case, check if given modlen is module increment */
494 if (capability->modlen.increment != 0) {
495 if (modlen % (capability->modlen.increment))
504 rte_cryptodev_get_feature_name(uint64_t flag)
507 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
508 return "SYMMETRIC_CRYPTO";
509 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
510 return "ASYMMETRIC_CRYPTO";
511 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
512 return "SYM_OPERATION_CHAINING";
513 case RTE_CRYPTODEV_FF_CPU_SSE:
515 case RTE_CRYPTODEV_FF_CPU_AVX:
517 case RTE_CRYPTODEV_FF_CPU_AVX2:
519 case RTE_CRYPTODEV_FF_CPU_AVX512:
521 case RTE_CRYPTODEV_FF_CPU_AESNI:
523 case RTE_CRYPTODEV_FF_HW_ACCELERATED:
524 return "HW_ACCELERATED";
525 case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
526 return "IN_PLACE_SGL";
527 case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
528 return "OOP_SGL_IN_SGL_OUT";
529 case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
530 return "OOP_SGL_IN_LB_OUT";
531 case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
532 return "OOP_LB_IN_SGL_OUT";
533 case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
534 return "OOP_LB_IN_LB_OUT";
535 case RTE_CRYPTODEV_FF_CPU_NEON:
537 case RTE_CRYPTODEV_FF_CPU_ARM_CE:
539 case RTE_CRYPTODEV_FF_SECURITY:
540 return "SECURITY_PROTOCOL";
541 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
542 return "RSA_PRIV_OP_KEY_EXP";
543 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
544 return "RSA_PRIV_OP_KEY_QT";
545 case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
546 return "DIGEST_ENCRYPTED";
547 case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
548 return "SYM_CPU_CRYPTO";
549 case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
550 return "ASYM_SESSIONLESS";
551 case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
552 return "SYM_SESSIONLESS";
553 case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
554 return "NON_BYTE_ALIGNED_DATA";
560 struct rte_cryptodev *
561 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
563 return &cryptodev_globals.devs[dev_id];
566 struct rte_cryptodev *
567 rte_cryptodev_pmd_get_named_dev(const char *name)
569 struct rte_cryptodev *dev;
575 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
576 dev = &cryptodev_globals.devs[i];
578 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
579 (strcmp(dev->data->name, name) == 0))
586 static inline uint8_t
587 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
589 if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
590 rte_crypto_devices[dev_id].data == NULL)
597 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
599 struct rte_cryptodev *dev = NULL;
601 if (!rte_cryptodev_is_valid_device_data(dev_id))
604 dev = rte_cryptodev_pmd_get_dev(dev_id);
605 if (dev->attached != RTE_CRYPTODEV_ATTACHED)
613 rte_cryptodev_get_dev_id(const char *name)
620 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
621 if (!rte_cryptodev_is_valid_device_data(i))
623 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
625 (cryptodev_globals.devs[i].attached ==
626 RTE_CRYPTODEV_ATTACHED))
634 rte_cryptodev_count(void)
636 return cryptodev_globals.nb_devs;
640 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
642 uint8_t i, dev_count = 0;
644 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
645 if (cryptodev_globals.devs[i].driver_id == driver_id &&
646 cryptodev_globals.devs[i].attached ==
647 RTE_CRYPTODEV_ATTACHED)
654 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
657 uint8_t i, count = 0;
658 struct rte_cryptodev *devs = cryptodev_globals.devs;
660 for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
661 if (!rte_cryptodev_is_valid_device_data(i))
664 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
667 cmp = strncmp(devs[i].device->driver->name,
669 strlen(driver_name) + 1);
672 devices[count++] = devs[i].data->dev_id;
680 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
682 if (dev_id < RTE_CRYPTO_MAX_DEVS &&
683 (rte_crypto_devices[dev_id].feature_flags &
684 RTE_CRYPTODEV_FF_SECURITY))
685 return rte_crypto_devices[dev_id].security_ctx;
691 rte_cryptodev_socket_id(uint8_t dev_id)
693 struct rte_cryptodev *dev;
695 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
698 dev = rte_cryptodev_pmd_get_dev(dev_id);
700 return dev->data->socket_id;
704 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
707 char mz_name[RTE_MEMZONE_NAMESIZE];
708 const struct rte_memzone *mz;
711 /* generate memzone name */
712 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
713 if (n >= (int)sizeof(mz_name))
716 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
717 mz = rte_memzone_reserve(mz_name,
718 sizeof(struct rte_cryptodev_data),
720 CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
723 mz = rte_memzone_lookup(mz_name);
724 CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
732 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
733 memset(*data, 0, sizeof(struct rte_cryptodev_data));
739 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
741 char mz_name[RTE_MEMZONE_NAMESIZE];
742 const struct rte_memzone *mz;
745 /* generate memzone name */
746 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
747 if (n >= (int)sizeof(mz_name))
750 mz = rte_memzone_lookup(mz_name);
754 RTE_ASSERT(*data == mz->addr);
757 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
758 CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
760 return rte_memzone_free(mz);
762 CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
770 rte_cryptodev_find_free_device_index(void)
774 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
775 if (rte_crypto_devices[dev_id].attached ==
776 RTE_CRYPTODEV_DETACHED)
779 return RTE_CRYPTO_MAX_DEVS;
782 struct rte_cryptodev *
783 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
785 struct rte_cryptodev *cryptodev;
788 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
789 CDEV_LOG_ERR("Crypto device with name %s already "
794 dev_id = rte_cryptodev_find_free_device_index();
795 if (dev_id == RTE_CRYPTO_MAX_DEVS) {
796 CDEV_LOG_ERR("Reached maximum number of crypto devices");
800 cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
802 if (cryptodev->data == NULL) {
803 struct rte_cryptodev_data **cryptodev_data =
804 &cryptodev_globals.data[dev_id];
806 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
809 if (retval < 0 || *cryptodev_data == NULL)
812 cryptodev->data = *cryptodev_data;
814 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
815 strlcpy(cryptodev->data->name, name,
816 RTE_CRYPTODEV_NAME_MAX_LEN);
818 cryptodev->data->dev_id = dev_id;
819 cryptodev->data->socket_id = socket_id;
820 cryptodev->data->dev_started = 0;
821 CDEV_LOG_DEBUG("PRIMARY:init data");
824 CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
825 cryptodev->data->name,
826 cryptodev->data->dev_id,
827 cryptodev->data->socket_id,
828 cryptodev->data->dev_started);
830 /* init user callbacks */
831 TAILQ_INIT(&(cryptodev->link_intr_cbs));
833 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
835 cryptodev_globals.nb_devs++;
842 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
847 if (cryptodev == NULL)
850 dev_id = cryptodev->data->dev_id;
852 /* Close device only if device operations have been set */
853 if (cryptodev->dev_ops) {
854 ret = rte_cryptodev_close(dev_id);
859 ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
863 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
864 cryptodev_globals.nb_devs--;
869 rte_cryptodev_queue_pair_count(uint8_t dev_id)
871 struct rte_cryptodev *dev;
873 if (!rte_cryptodev_is_valid_device_data(dev_id)) {
874 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
878 dev = &rte_crypto_devices[dev_id];
879 return dev->data->nb_queue_pairs;
883 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
886 struct rte_cryptodev_info dev_info;
890 if ((dev == NULL) || (nb_qpairs < 1)) {
891 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
896 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
897 nb_qpairs, dev->data->dev_id);
899 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
901 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
902 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
904 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
905 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
906 nb_qpairs, dev->data->dev_id);
910 if (dev->data->queue_pairs == NULL) { /* first time configuration */
911 dev->data->queue_pairs = rte_zmalloc_socket(
912 "cryptodev->queue_pairs",
913 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
914 RTE_CACHE_LINE_SIZE, socket_id);
916 if (dev->data->queue_pairs == NULL) {
917 dev->data->nb_queue_pairs = 0;
918 CDEV_LOG_ERR("failed to get memory for qp meta data, "
923 } else { /* re-configure */
925 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
927 qp = dev->data->queue_pairs;
929 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
932 for (i = nb_qpairs; i < old_nb_queues; i++) {
933 ret = (*dev->dev_ops->queue_pair_release)(dev, i);
938 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
939 RTE_CACHE_LINE_SIZE);
941 CDEV_LOG_ERR("failed to realloc qp meta data,"
942 " nb_queues %u", nb_qpairs);
946 if (nb_qpairs > old_nb_queues) {
947 uint16_t new_qs = nb_qpairs - old_nb_queues;
949 memset(qp + old_nb_queues, 0,
950 sizeof(qp[0]) * new_qs);
953 dev->data->queue_pairs = qp;
956 dev->data->nb_queue_pairs = nb_qpairs;
961 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
963 struct rte_cryptodev *dev;
966 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
967 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
971 dev = &rte_crypto_devices[dev_id];
973 if (dev->data->dev_started) {
975 "device %d must be stopped to allow configuration", dev_id);
979 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
981 /* Setup new number of queue pairs and reconfigure device. */
982 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
985 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
990 rte_cryptodev_trace_configure(dev_id, config);
991 return (*dev->dev_ops->dev_configure)(dev, config);
996 rte_cryptodev_start(uint8_t dev_id)
998 struct rte_cryptodev *dev;
1001 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1003 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1004 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1008 dev = &rte_crypto_devices[dev_id];
1010 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1012 if (dev->data->dev_started != 0) {
1013 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1018 diag = (*dev->dev_ops->dev_start)(dev);
1019 rte_cryptodev_trace_start(dev_id, diag);
1021 dev->data->dev_started = 1;
1029 rte_cryptodev_stop(uint8_t dev_id)
1031 struct rte_cryptodev *dev;
1033 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1034 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1038 dev = &rte_crypto_devices[dev_id];
1040 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1042 if (dev->data->dev_started == 0) {
1043 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1048 (*dev->dev_ops->dev_stop)(dev);
1049 rte_cryptodev_trace_stop(dev_id);
1050 dev->data->dev_started = 0;
1054 rte_cryptodev_close(uint8_t dev_id)
1056 struct rte_cryptodev *dev;
1059 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1060 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1064 dev = &rte_crypto_devices[dev_id];
1066 /* Device must be stopped before it can be closed */
1067 if (dev->data->dev_started == 1) {
1068 CDEV_LOG_ERR("Device %u must be stopped before closing",
1073 /* We can't close the device if there are outstanding sessions in use */
1074 if (dev->data->session_pool != NULL) {
1075 if (!rte_mempool_full(dev->data->session_pool)) {
1076 CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1077 "has sessions still in use, free "
1078 "all sessions before calling close",
1084 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1085 retval = (*dev->dev_ops->dev_close)(dev);
1086 rte_cryptodev_trace_close(dev_id, retval);
1088 if (capability_copy[dev_id]) {
1089 free(capability_copy[dev_id]);
1090 capability_copy[dev_id] = NULL;
1092 is_capability_checked[dev_id] = 0;
1101 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1103 struct rte_cryptodev *dev;
1105 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1106 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1110 dev = &rte_crypto_devices[dev_id];
1111 if (queue_pair_id >= dev->data->nb_queue_pairs) {
1112 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1115 void **qps = dev->data->queue_pairs;
1117 if (qps[queue_pair_id]) {
1118 CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1119 queue_pair_id, dev_id);
1123 CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1124 queue_pair_id, dev_id);
1130 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1131 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1134 struct rte_cryptodev *dev;
1136 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1137 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1141 dev = &rte_crypto_devices[dev_id];
1142 if (queue_pair_id >= dev->data->nb_queue_pairs) {
1143 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1148 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1152 if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1153 (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1154 CDEV_LOG_ERR("Invalid mempools\n");
1158 if (qp_conf->mp_session) {
1159 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1160 uint32_t obj_size = qp_conf->mp_session->elt_size;
1161 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1162 struct rte_cryptodev_sym_session s = {0};
1164 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1165 if (!pool_priv || qp_conf->mp_session->private_data_size <
1166 sizeof(*pool_priv)) {
1167 CDEV_LOG_ERR("Invalid mempool\n");
1171 s.nb_drivers = pool_priv->nb_drivers;
1172 s.user_data_sz = pool_priv->user_data_sz;
1174 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1175 obj_size) || (s.nb_drivers <= dev->driver_id) ||
1176 rte_cryptodev_sym_get_private_session_size(dev_id) >
1178 CDEV_LOG_ERR("Invalid mempool\n");
1183 if (dev->data->dev_started) {
1185 "device %d must be stopped to allow configuration", dev_id);
1189 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1191 rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1192 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1198 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1200 struct rte_cryptodev *dev;
1202 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1203 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1207 if (stats == NULL) {
1208 CDEV_LOG_ERR("Invalid stats ptr");
1212 dev = &rte_crypto_devices[dev_id];
1213 memset(stats, 0, sizeof(*stats));
1215 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1216 (*dev->dev_ops->stats_get)(dev, stats);
1221 rte_cryptodev_stats_reset(uint8_t dev_id)
1223 struct rte_cryptodev *dev;
1225 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1226 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1230 dev = &rte_crypto_devices[dev_id];
1232 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1233 (*dev->dev_ops->stats_reset)(dev);
1237 get_v20_capabilities(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1239 const struct rte_cryptodev_capabilities *capability;
1240 uint8_t found_invalid_capa = 0;
1241 uint8_t counter = 0;
1243 for (capability = dev_info->capabilities;
1244 capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
1245 ++capability, ++counter) {
1246 if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
1247 capability->sym.xform_type ==
1248 RTE_CRYPTO_SYM_XFORM_AEAD
1249 && capability->sym.aead.algo >=
1250 RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
1251 found_invalid_capa = 1;
1255 is_capability_checked[dev_id] = 1;
1256 if (!found_invalid_capa)
1258 capability_copy[dev_id] = malloc(counter *
1259 sizeof(struct rte_cryptodev_capabilities));
1260 if (capability_copy[dev_id] == NULL) {
1262 * error case - no memory to store the trimmed
1263 * list, so have to return an empty list
1265 dev_info->capabilities =
1266 cryptodev_undefined_capabilities;
1267 is_capability_checked[dev_id] = 0;
1270 for (capability = dev_info->capabilities;
1272 RTE_CRYPTO_OP_TYPE_UNDEFINED;
1274 if (!(capability->op ==
1275 RTE_CRYPTO_OP_TYPE_SYMMETRIC
1276 && capability->sym.xform_type ==
1277 RTE_CRYPTO_SYM_XFORM_AEAD
1278 && capability->sym.aead.algo >=
1279 RTE_CRYPTO_AEAD_CHACHA20_POLY1305)) {
1280 capability_copy[dev_id][counter++] =
1284 dev_info->capabilities =
1285 capability_copy[dev_id];
1290 rte_cryptodev_info_get_v20(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1292 struct rte_cryptodev *dev;
1294 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1295 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1299 dev = &rte_crypto_devices[dev_id];
1301 memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1303 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1304 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1306 if (capability_copy[dev_id] == NULL) {
1307 if (!is_capability_checked[dev_id])
1308 get_v20_capabilities(dev_id, dev_info);
1310 dev_info->capabilities = capability_copy[dev_id];
1312 dev_info->driver_name = dev->device->driver->name;
1313 dev_info->device = dev->device;
1315 VERSION_SYMBOL(rte_cryptodev_info_get, _v20, 20.0);
1318 rte_cryptodev_info_get_v21(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1320 struct rte_cryptodev *dev;
1322 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1323 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1327 dev = &rte_crypto_devices[dev_id];
1329 memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1331 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1332 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1334 dev_info->driver_name = dev->device->driver->name;
1335 dev_info->device = dev->device;
1337 MAP_STATIC_SYMBOL(void rte_cryptodev_info_get(uint8_t dev_id,
1338 struct rte_cryptodev_info *dev_info), rte_cryptodev_info_get_v21);
1339 BIND_DEFAULT_SYMBOL(rte_cryptodev_info_get, _v21, 21);
1342 rte_cryptodev_callback_register(uint8_t dev_id,
1343 enum rte_cryptodev_event_type event,
1344 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1346 struct rte_cryptodev *dev;
1347 struct rte_cryptodev_callback *user_cb;
1352 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1353 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1357 dev = &rte_crypto_devices[dev_id];
1358 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1360 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1361 if (user_cb->cb_fn == cb_fn &&
1362 user_cb->cb_arg == cb_arg &&
1363 user_cb->event == event) {
1368 /* create a new callback. */
1369 if (user_cb == NULL) {
1370 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1371 sizeof(struct rte_cryptodev_callback), 0);
1372 if (user_cb != NULL) {
1373 user_cb->cb_fn = cb_fn;
1374 user_cb->cb_arg = cb_arg;
1375 user_cb->event = event;
1376 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1380 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1381 return (user_cb == NULL) ? -ENOMEM : 0;
1385 rte_cryptodev_callback_unregister(uint8_t dev_id,
1386 enum rte_cryptodev_event_type event,
1387 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1390 struct rte_cryptodev *dev;
1391 struct rte_cryptodev_callback *cb, *next;
1396 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1397 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1401 dev = &rte_crypto_devices[dev_id];
1402 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1405 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1407 next = TAILQ_NEXT(cb, next);
1409 if (cb->cb_fn != cb_fn || cb->event != event ||
1410 (cb->cb_arg != (void *)-1 &&
1411 cb->cb_arg != cb_arg))
1415 * if this callback is not executing right now,
1418 if (cb->active == 0) {
1419 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1426 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1431 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1432 enum rte_cryptodev_event_type event)
1434 struct rte_cryptodev_callback *cb_lst;
1435 struct rte_cryptodev_callback dev_cb;
1437 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1438 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1439 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1443 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1444 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1446 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1449 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1454 rte_cryptodev_sym_session_init(uint8_t dev_id,
1455 struct rte_cryptodev_sym_session *sess,
1456 struct rte_crypto_sym_xform *xforms,
1457 struct rte_mempool *mp)
1459 struct rte_cryptodev *dev;
1460 uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1465 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1466 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1470 dev = rte_cryptodev_pmd_get_dev(dev_id);
1472 if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1475 if (mp->elt_size < sess_priv_sz)
1478 index = dev->driver_id;
1479 if (index >= sess->nb_drivers)
1482 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1484 if (sess->sess_data[index].refcnt == 0) {
1485 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1489 "dev_id %d failed to configure session details",
1495 rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1496 sess->sess_data[index].refcnt++;
1501 rte_cryptodev_asym_session_init(uint8_t dev_id,
1502 struct rte_cryptodev_asym_session *sess,
1503 struct rte_crypto_asym_xform *xforms,
1504 struct rte_mempool *mp)
1506 struct rte_cryptodev *dev;
1510 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1511 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1515 dev = rte_cryptodev_pmd_get_dev(dev_id);
1517 if (sess == NULL || xforms == NULL || dev == NULL)
1520 index = dev->driver_id;
1522 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1525 if (sess->sess_private_data[index] == NULL) {
1526 ret = dev->dev_ops->asym_session_configure(dev,
1531 "dev_id %d failed to configure session details",
1537 rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1541 struct rte_mempool *
1542 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1543 uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1546 struct rte_mempool *mp;
1547 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1550 obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1551 if (obj_sz > elt_size)
1552 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1557 mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1558 (uint32_t)(sizeof(*pool_priv)),
1559 NULL, NULL, NULL, NULL,
1562 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1563 __func__, name, rte_errno);
1567 pool_priv = rte_mempool_get_priv(mp);
1569 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1571 rte_mempool_free(mp);
1575 pool_priv->nb_drivers = nb_drivers;
1576 pool_priv->user_data_sz = user_data_size;
1578 rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1579 elt_size, cache_size, user_data_size, mp);
1584 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1586 return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1591 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1593 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1598 pool_priv = rte_mempool_get_priv(mp);
1600 if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1601 pool_priv->nb_drivers != nb_drivers ||
1603 rte_cryptodev_sym_get_header_session_size()
1604 + pool_priv->user_data_sz)
1610 struct rte_cryptodev_sym_session *
1611 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1613 struct rte_cryptodev_sym_session *sess;
1614 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1616 if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1617 CDEV_LOG_ERR("Invalid mempool\n");
1621 pool_priv = rte_mempool_get_priv(mp);
1623 /* Allocate a session structure from the session pool */
1624 if (rte_mempool_get(mp, (void **)&sess)) {
1625 CDEV_LOG_ERR("couldn't get object from session mempool");
1629 sess->nb_drivers = pool_priv->nb_drivers;
1630 sess->user_data_sz = pool_priv->user_data_sz;
1631 sess->opaque_data = 0;
1633 /* Clear device session pointer.
1634 * Include the flag indicating presence of user data
1636 memset(sess->sess_data, 0,
1637 rte_cryptodev_sym_session_data_size(sess));
1639 rte_cryptodev_trace_sym_session_create(mp, sess);
1643 struct rte_cryptodev_asym_session *
1644 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1646 struct rte_cryptodev_asym_session *sess;
1647 unsigned int session_size =
1648 rte_cryptodev_asym_get_header_session_size();
1651 CDEV_LOG_ERR("invalid mempool\n");
1655 /* Verify if provided mempool can hold elements big enough. */
1656 if (mp->elt_size < session_size) {
1658 "mempool elements too small to hold session objects");
1662 /* Allocate a session structure from the session pool */
1663 if (rte_mempool_get(mp, (void **)&sess)) {
1664 CDEV_LOG_ERR("couldn't get object from session mempool");
1668 /* Clear device session pointer.
1669 * Include the flag indicating presence of private data
1671 memset(sess, 0, session_size);
1673 rte_cryptodev_trace_asym_session_create(mp, sess);
1678 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1679 struct rte_cryptodev_sym_session *sess)
1681 struct rte_cryptodev *dev;
1684 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1685 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1689 dev = rte_cryptodev_pmd_get_dev(dev_id);
1691 if (dev == NULL || sess == NULL)
1694 driver_id = dev->driver_id;
1695 if (sess->sess_data[driver_id].refcnt == 0)
1697 if (--sess->sess_data[driver_id].refcnt != 0)
1700 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1702 dev->dev_ops->sym_session_clear(dev, sess);
1704 rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1709 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1710 struct rte_cryptodev_asym_session *sess)
1712 struct rte_cryptodev *dev;
1714 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1715 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1719 dev = rte_cryptodev_pmd_get_dev(dev_id);
1721 if (dev == NULL || sess == NULL)
1724 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1726 dev->dev_ops->asym_session_clear(dev, sess);
1728 rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1733 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1736 struct rte_mempool *sess_mp;
1741 /* Check that all device private data has been freed */
1742 for (i = 0; i < sess->nb_drivers; i++) {
1743 if (sess->sess_data[i].refcnt != 0)
1747 /* Return session to mempool */
1748 sess_mp = rte_mempool_from_obj(sess);
1749 rte_mempool_put(sess_mp, sess);
1751 rte_cryptodev_trace_sym_session_free(sess);
1756 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1760 struct rte_mempool *sess_mp;
1765 /* Check that all device private data has been freed */
1766 for (i = 0; i < nb_drivers; i++) {
1767 sess_priv = get_asym_session_private_data(sess, i);
1768 if (sess_priv != NULL)
1772 /* Return session to mempool */
1773 sess_mp = rte_mempool_from_obj(sess);
1774 rte_mempool_put(sess_mp, sess);
1776 rte_cryptodev_trace_asym_session_free(sess);
1781 rte_cryptodev_sym_get_header_session_size(void)
1784 * Header contains pointers to the private data of all registered
1785 * drivers and all necessary information to ensure safely clear
1786 * or free al session.
1788 struct rte_cryptodev_sym_session s = {0};
1790 s.nb_drivers = nb_drivers;
1792 return (unsigned int)(sizeof(s) +
1793 rte_cryptodev_sym_session_data_size(&s));
1797 rte_cryptodev_sym_get_existing_header_session_size(
1798 struct rte_cryptodev_sym_session *sess)
1803 return (unsigned int)(sizeof(*sess) +
1804 rte_cryptodev_sym_session_data_size(sess));
1808 rte_cryptodev_asym_get_header_session_size(void)
1811 * Header contains pointers to the private data
1812 * of all registered drivers, and a flag which
1813 * indicates presence of private data
1815 return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1819 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1821 struct rte_cryptodev *dev;
1822 unsigned int priv_sess_size;
1824 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1827 dev = rte_cryptodev_pmd_get_dev(dev_id);
1829 if (*dev->dev_ops->sym_session_get_size == NULL)
1832 priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1834 return priv_sess_size;
1838 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1840 struct rte_cryptodev *dev;
1841 unsigned int header_size = sizeof(void *) * nb_drivers;
1842 unsigned int priv_sess_size;
1844 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1847 dev = rte_cryptodev_pmd_get_dev(dev_id);
1849 if (*dev->dev_ops->asym_session_get_size == NULL)
1852 priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1853 if (priv_sess_size < header_size)
1856 return priv_sess_size;
1861 rte_cryptodev_sym_session_set_user_data(
1862 struct rte_cryptodev_sym_session *sess,
1869 if (sess->user_data_sz < size)
1872 rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1877 rte_cryptodev_sym_session_get_user_data(
1878 struct rte_cryptodev_sym_session *sess)
1880 if (sess == NULL || sess->user_data_sz == 0)
1883 return (void *)(sess->sess_data + sess->nb_drivers);
1887 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
1890 for (i = 0; i < vec->num; i++)
1891 vec->status[i] = errnum;
1895 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1896 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1897 struct rte_crypto_sym_vec *vec)
1899 struct rte_cryptodev *dev;
1901 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1902 sym_crypto_fill_status(vec, EINVAL);
1906 dev = rte_cryptodev_pmd_get_dev(dev_id);
1908 if (*dev->dev_ops->sym_cpu_process == NULL ||
1909 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
1910 sym_crypto_fill_status(vec, ENOTSUP);
1914 return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
1917 /** Initialise rte_crypto_op mempool element */
1919 rte_crypto_op_init(struct rte_mempool *mempool,
1922 __rte_unused unsigned i)
1924 struct rte_crypto_op *op = _op_data;
1925 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1927 memset(_op_data, 0, mempool->elt_size);
1929 __rte_crypto_op_reset(op, type);
1931 op->phys_addr = rte_mem_virt2iova(_op_data);
1932 op->mempool = mempool;
1936 struct rte_mempool *
1937 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1938 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1941 struct rte_crypto_op_pool_private *priv;
1943 unsigned elt_size = sizeof(struct rte_crypto_op) +
1946 if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1947 elt_size += sizeof(struct rte_crypto_sym_op);
1948 } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1949 elt_size += sizeof(struct rte_crypto_asym_op);
1950 } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1951 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1952 sizeof(struct rte_crypto_asym_op));
1954 CDEV_LOG_ERR("Invalid op_type\n");
1958 /* lookup mempool in case already allocated */
1959 struct rte_mempool *mp = rte_mempool_lookup(name);
1962 priv = (struct rte_crypto_op_pool_private *)
1963 rte_mempool_get_priv(mp);
1965 if (mp->elt_size != elt_size ||
1966 mp->cache_size < cache_size ||
1967 mp->size < nb_elts ||
1968 priv->priv_size < priv_size) {
1970 CDEV_LOG_ERR("Mempool %s already exists but with "
1971 "incompatible parameters", name);
1977 mp = rte_mempool_create(
1982 sizeof(struct rte_crypto_op_pool_private),
1991 CDEV_LOG_ERR("Failed to create mempool %s", name);
1995 priv = (struct rte_crypto_op_pool_private *)
1996 rte_mempool_get_priv(mp);
1998 priv->priv_size = priv_size;
2005 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2007 struct rte_cryptodev *dev = NULL;
2013 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2014 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2015 "%s_%u", dev_name_prefix, i);
2020 dev = rte_cryptodev_pmd_get_named_dev(name);
2028 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2030 static struct cryptodev_driver_list cryptodev_driver_list =
2031 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2034 rte_cryptodev_driver_id_get(const char *name)
2036 struct cryptodev_driver *driver;
2037 const char *driver_name;
2040 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2044 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2045 driver_name = driver->driver->name;
2046 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2053 rte_cryptodev_name_get(uint8_t dev_id)
2055 struct rte_cryptodev *dev;
2057 if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2058 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2062 dev = rte_cryptodev_pmd_get_dev(dev_id);
2066 return dev->data->name;
2070 rte_cryptodev_driver_name_get(uint8_t driver_id)
2072 struct cryptodev_driver *driver;
2074 TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2075 if (driver->id == driver_id)
2076 return driver->driver->name;
2081 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2082 const struct rte_driver *drv)
2084 crypto_drv->driver = drv;
2085 crypto_drv->id = nb_drivers;
2087 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2089 return nb_drivers++;