1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020 Intel Corporation
15 #include <netinet/in.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43 #include "rte_cryptodev_trace.h"
45 static uint8_t nb_drivers;
47 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
49 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
51 static struct rte_cryptodev_global cryptodev_globals = {
52 .devs = rte_crypto_devices,
57 /* spinlock for crypto device callbacks */
58 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
62 * The user application callback description.
64 * It contains callback address to be registered by user application,
65 * the pointer to the parameters for callback, and the event type.
67 struct rte_cryptodev_callback {
68 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
69 rte_cryptodev_cb_fn cb_fn; /**< Callback address */
70 void *cb_arg; /**< Parameter for callback */
71 enum rte_cryptodev_event_type event; /**< Interrupt event type */
72 uint32_t active; /**< Callback is executing */
76 * The crypto cipher algorithm strings identifiers.
77 * It could be used in application command line.
80 rte_crypto_cipher_algorithm_strings[] = {
81 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc",
82 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb",
83 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr",
85 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc",
86 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr",
87 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi",
88 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb",
89 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8",
90 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts",
92 [RTE_CRYPTO_CIPHER_ARC4] = "arc4",
94 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc",
95 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi",
97 [RTE_CRYPTO_CIPHER_NULL] = "null",
99 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8",
100 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
101 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3"
105 * The crypto cipher operation strings identifiers.
106 * It could be used in application command line.
109 rte_crypto_cipher_operation_strings[] = {
110 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt",
111 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt"
115 * The crypto auth algorithm strings identifiers.
116 * It could be used in application command line.
119 rte_crypto_auth_algorithm_strings[] = {
120 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac",
121 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac",
122 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac",
123 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac",
125 [RTE_CRYPTO_AUTH_MD5] = "md5",
126 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac",
128 [RTE_CRYPTO_AUTH_NULL] = "null",
130 [RTE_CRYPTO_AUTH_SHA1] = "sha1",
131 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac",
133 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224",
134 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac",
135 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256",
136 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac",
137 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384",
138 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac",
139 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512",
140 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac",
142 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9",
143 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2",
144 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3"
148 * The crypto AEAD algorithm strings identifiers.
149 * It could be used in application command line.
152 rte_crypto_aead_algorithm_strings[] = {
153 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm",
154 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm",
155 [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
159 * The crypto AEAD operation strings identifiers.
160 * It could be used in application command line.
163 rte_crypto_aead_operation_strings[] = {
164 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt",
165 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt"
169 * Asymmetric crypto transform operation strings identifiers.
171 const char *rte_crypto_asym_xform_strings[] = {
172 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none",
173 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa",
174 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp",
175 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv",
176 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh",
177 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa",
178 [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa",
179 [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm",
183 * Asymmetric crypto operation strings identifiers.
185 const char *rte_crypto_asym_op_strings[] = {
186 [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt",
187 [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt",
188 [RTE_CRYPTO_ASYM_OP_SIGN] = "sign",
189 [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify",
190 [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE] = "priv_key_generate",
191 [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
192 [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
196 * The private data structure stored in the session mempool private data.
198 struct rte_cryptodev_sym_session_pool_private_data {
200 /**< number of elements in sess_data array */
201 uint16_t user_data_sz;
202 /**< session user data will be placed after sess_data */
206 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
207 const char *algo_string)
211 for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
212 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
213 *algo_enum = (enum rte_crypto_cipher_algorithm) i;
223 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
224 const char *algo_string)
228 for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
229 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
230 *algo_enum = (enum rte_crypto_auth_algorithm) i;
240 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
241 const char *algo_string)
245 for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
246 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
247 *algo_enum = (enum rte_crypto_aead_algorithm) i;
257 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
258 const char *xform_string)
262 for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
263 if (strcmp(xform_string,
264 rte_crypto_asym_xform_strings[i]) == 0) {
265 *xform_enum = (enum rte_crypto_asym_xform_type) i;
275 * The crypto auth operation strings identifiers.
276 * It could be used in application command line.
279 rte_crypto_auth_operation_strings[] = {
280 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify",
281 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate"
284 const struct rte_cryptodev_symmetric_capability *
285 rte_cryptodev_sym_capability_get(uint8_t dev_id,
286 const struct rte_cryptodev_sym_capability_idx *idx)
288 const struct rte_cryptodev_capabilities *capability;
289 struct rte_cryptodev_info dev_info;
292 rte_cryptodev_info_get(dev_id, &dev_info);
294 while ((capability = &dev_info.capabilities[i++])->op !=
295 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
296 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
299 if (capability->sym.xform_type != idx->type)
302 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
303 capability->sym.auth.algo == idx->algo.auth)
304 return &capability->sym;
306 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
307 capability->sym.cipher.algo == idx->algo.cipher)
308 return &capability->sym;
310 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
311 capability->sym.aead.algo == idx->algo.aead)
312 return &capability->sym;
320 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
322 unsigned int next_size;
324 /* Check lower/upper bounds */
325 if (size < range->min)
328 if (size > range->max)
331 /* If range is actually only one value, size is correct */
332 if (range->increment == 0)
335 /* Check if value is one of the supported sizes */
336 for (next_size = range->min; next_size <= range->max;
337 next_size += range->increment)
338 if (size == next_size)
344 const struct rte_cryptodev_asymmetric_xform_capability *
345 rte_cryptodev_asym_capability_get(uint8_t dev_id,
346 const struct rte_cryptodev_asym_capability_idx *idx)
348 const struct rte_cryptodev_capabilities *capability;
349 struct rte_cryptodev_info dev_info;
352 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
353 rte_cryptodev_info_get(dev_id, &dev_info);
355 while ((capability = &dev_info.capabilities[i++])->op !=
356 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
357 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
360 if (capability->asym.xform_capa.xform_type == idx->type)
361 return &capability->asym.xform_capa;
367 rte_cryptodev_sym_capability_check_cipher(
368 const struct rte_cryptodev_symmetric_capability *capability,
369 uint16_t key_size, uint16_t iv_size)
371 if (param_range_check(key_size, &capability->cipher.key_size) != 0)
374 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
381 rte_cryptodev_sym_capability_check_auth(
382 const struct rte_cryptodev_symmetric_capability *capability,
383 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
385 if (param_range_check(key_size, &capability->auth.key_size) != 0)
388 if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
391 if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
398 rte_cryptodev_sym_capability_check_aead(
399 const struct rte_cryptodev_symmetric_capability *capability,
400 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
403 if (param_range_check(key_size, &capability->aead.key_size) != 0)
406 if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
409 if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
412 if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
418 rte_cryptodev_asym_xform_capability_check_optype(
419 const struct rte_cryptodev_asymmetric_xform_capability *capability,
420 enum rte_crypto_asym_op_type op_type)
422 if (capability->op_types & (1 << op_type))
429 rte_cryptodev_asym_xform_capability_check_modlen(
430 const struct rte_cryptodev_asymmetric_xform_capability *capability,
433 /* no need to check for limits, if min or max = 0 */
434 if (capability->modlen.min != 0) {
435 if (modlen < capability->modlen.min)
439 if (capability->modlen.max != 0) {
440 if (modlen > capability->modlen.max)
444 /* in any case, check if given modlen is module increment */
445 if (capability->modlen.increment != 0) {
446 if (modlen % (capability->modlen.increment))
455 rte_cryptodev_get_feature_name(uint64_t flag)
458 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
459 return "SYMMETRIC_CRYPTO";
460 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
461 return "ASYMMETRIC_CRYPTO";
462 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
463 return "SYM_OPERATION_CHAINING";
464 case RTE_CRYPTODEV_FF_CPU_SSE:
466 case RTE_CRYPTODEV_FF_CPU_AVX:
468 case RTE_CRYPTODEV_FF_CPU_AVX2:
470 case RTE_CRYPTODEV_FF_CPU_AVX512:
472 case RTE_CRYPTODEV_FF_CPU_AESNI:
474 case RTE_CRYPTODEV_FF_HW_ACCELERATED:
475 return "HW_ACCELERATED";
476 case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
477 return "IN_PLACE_SGL";
478 case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
479 return "OOP_SGL_IN_SGL_OUT";
480 case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
481 return "OOP_SGL_IN_LB_OUT";
482 case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
483 return "OOP_LB_IN_SGL_OUT";
484 case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
485 return "OOP_LB_IN_LB_OUT";
486 case RTE_CRYPTODEV_FF_CPU_NEON:
488 case RTE_CRYPTODEV_FF_CPU_ARM_CE:
490 case RTE_CRYPTODEV_FF_SECURITY:
491 return "SECURITY_PROTOCOL";
492 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
493 return "RSA_PRIV_OP_KEY_EXP";
494 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
495 return "RSA_PRIV_OP_KEY_QT";
496 case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
497 return "DIGEST_ENCRYPTED";
498 case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
499 return "SYM_CPU_CRYPTO";
500 case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
501 return "ASYM_SESSIONLESS";
502 case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
503 return "SYM_SESSIONLESS";
504 case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
505 return "NON_BYTE_ALIGNED_DATA";
511 struct rte_cryptodev *
512 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
514 return &cryptodev_globals.devs[dev_id];
517 struct rte_cryptodev *
518 rte_cryptodev_pmd_get_named_dev(const char *name)
520 struct rte_cryptodev *dev;
526 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
527 dev = &cryptodev_globals.devs[i];
529 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
530 (strcmp(dev->data->name, name) == 0))
537 static inline uint8_t
538 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
540 if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
541 rte_crypto_devices[dev_id].data == NULL)
548 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
550 struct rte_cryptodev *dev = NULL;
552 if (!rte_cryptodev_is_valid_device_data(dev_id))
555 dev = rte_cryptodev_pmd_get_dev(dev_id);
556 if (dev->attached != RTE_CRYPTODEV_ATTACHED)
564 rte_cryptodev_get_dev_id(const char *name)
571 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
572 if (!rte_cryptodev_is_valid_device_data(i))
574 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
576 (cryptodev_globals.devs[i].attached ==
577 RTE_CRYPTODEV_ATTACHED))
585 rte_cryptodev_count(void)
587 return cryptodev_globals.nb_devs;
591 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
593 uint8_t i, dev_count = 0;
595 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
596 if (cryptodev_globals.devs[i].driver_id == driver_id &&
597 cryptodev_globals.devs[i].attached ==
598 RTE_CRYPTODEV_ATTACHED)
605 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
608 uint8_t i, count = 0;
609 struct rte_cryptodev *devs = cryptodev_globals.devs;
611 for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
612 if (!rte_cryptodev_is_valid_device_data(i))
615 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
618 cmp = strncmp(devs[i].device->driver->name,
620 strlen(driver_name) + 1);
623 devices[count++] = devs[i].data->dev_id;
631 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
633 if (dev_id < RTE_CRYPTO_MAX_DEVS &&
634 (rte_crypto_devices[dev_id].feature_flags &
635 RTE_CRYPTODEV_FF_SECURITY))
636 return rte_crypto_devices[dev_id].security_ctx;
642 rte_cryptodev_socket_id(uint8_t dev_id)
644 struct rte_cryptodev *dev;
646 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
649 dev = rte_cryptodev_pmd_get_dev(dev_id);
651 return dev->data->socket_id;
655 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
658 char mz_name[RTE_MEMZONE_NAMESIZE];
659 const struct rte_memzone *mz;
662 /* generate memzone name */
663 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
664 if (n >= (int)sizeof(mz_name))
667 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
668 mz = rte_memzone_reserve(mz_name,
669 sizeof(struct rte_cryptodev_data),
672 mz = rte_memzone_lookup(mz_name);
678 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
679 memset(*data, 0, sizeof(struct rte_cryptodev_data));
685 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
687 char mz_name[RTE_MEMZONE_NAMESIZE];
688 const struct rte_memzone *mz;
691 /* generate memzone name */
692 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
693 if (n >= (int)sizeof(mz_name))
696 mz = rte_memzone_lookup(mz_name);
700 RTE_ASSERT(*data == mz->addr);
703 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
704 return rte_memzone_free(mz);
710 rte_cryptodev_find_free_device_index(void)
714 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
715 if (rte_crypto_devices[dev_id].attached ==
716 RTE_CRYPTODEV_DETACHED)
719 return RTE_CRYPTO_MAX_DEVS;
722 struct rte_cryptodev *
723 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
725 struct rte_cryptodev *cryptodev;
728 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
729 CDEV_LOG_ERR("Crypto device with name %s already "
734 dev_id = rte_cryptodev_find_free_device_index();
735 if (dev_id == RTE_CRYPTO_MAX_DEVS) {
736 CDEV_LOG_ERR("Reached maximum number of crypto devices");
740 cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
742 if (cryptodev->data == NULL) {
743 struct rte_cryptodev_data **cryptodev_data =
744 &cryptodev_globals.data[dev_id];
746 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
749 if (retval < 0 || *cryptodev_data == NULL)
752 cryptodev->data = *cryptodev_data;
754 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
755 strlcpy(cryptodev->data->name, name,
756 RTE_CRYPTODEV_NAME_MAX_LEN);
758 cryptodev->data->dev_id = dev_id;
759 cryptodev->data->socket_id = socket_id;
760 cryptodev->data->dev_started = 0;
763 /* init user callbacks */
764 TAILQ_INIT(&(cryptodev->link_intr_cbs));
766 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
768 cryptodev_globals.nb_devs++;
775 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
780 if (cryptodev == NULL)
783 dev_id = cryptodev->data->dev_id;
785 /* Close device only if device operations have been set */
786 if (cryptodev->dev_ops) {
787 ret = rte_cryptodev_close(dev_id);
792 ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
796 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
797 cryptodev_globals.nb_devs--;
802 rte_cryptodev_queue_pair_count(uint8_t dev_id)
804 struct rte_cryptodev *dev;
806 if (!rte_cryptodev_is_valid_device_data(dev_id)) {
807 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
811 dev = &rte_crypto_devices[dev_id];
812 return dev->data->nb_queue_pairs;
816 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
819 struct rte_cryptodev_info dev_info;
823 if ((dev == NULL) || (nb_qpairs < 1)) {
824 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
829 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
830 nb_qpairs, dev->data->dev_id);
832 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
834 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
835 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
837 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
838 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
839 nb_qpairs, dev->data->dev_id);
843 if (dev->data->queue_pairs == NULL) { /* first time configuration */
844 dev->data->queue_pairs = rte_zmalloc_socket(
845 "cryptodev->queue_pairs",
846 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
847 RTE_CACHE_LINE_SIZE, socket_id);
849 if (dev->data->queue_pairs == NULL) {
850 dev->data->nb_queue_pairs = 0;
851 CDEV_LOG_ERR("failed to get memory for qp meta data, "
856 } else { /* re-configure */
858 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
860 qp = dev->data->queue_pairs;
862 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
865 for (i = nb_qpairs; i < old_nb_queues; i++) {
866 ret = (*dev->dev_ops->queue_pair_release)(dev, i);
871 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
872 RTE_CACHE_LINE_SIZE);
874 CDEV_LOG_ERR("failed to realloc qp meta data,"
875 " nb_queues %u", nb_qpairs);
879 if (nb_qpairs > old_nb_queues) {
880 uint16_t new_qs = nb_qpairs - old_nb_queues;
882 memset(qp + old_nb_queues, 0,
883 sizeof(qp[0]) * new_qs);
886 dev->data->queue_pairs = qp;
889 dev->data->nb_queue_pairs = nb_qpairs;
894 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
896 struct rte_cryptodev *dev;
899 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
900 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
904 dev = &rte_crypto_devices[dev_id];
906 if (dev->data->dev_started) {
908 "device %d must be stopped to allow configuration", dev_id);
912 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
914 /* Setup new number of queue pairs and reconfigure device. */
915 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
918 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
923 rte_cryptodev_trace_configure(dev_id, config);
924 return (*dev->dev_ops->dev_configure)(dev, config);
929 rte_cryptodev_start(uint8_t dev_id)
931 struct rte_cryptodev *dev;
934 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
936 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
937 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
941 dev = &rte_crypto_devices[dev_id];
943 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
945 if (dev->data->dev_started != 0) {
946 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
951 diag = (*dev->dev_ops->dev_start)(dev);
952 rte_cryptodev_trace_start(dev_id, diag);
954 dev->data->dev_started = 1;
962 rte_cryptodev_stop(uint8_t dev_id)
964 struct rte_cryptodev *dev;
966 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
967 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
971 dev = &rte_crypto_devices[dev_id];
973 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
975 if (dev->data->dev_started == 0) {
976 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
981 (*dev->dev_ops->dev_stop)(dev);
982 rte_cryptodev_trace_stop(dev_id);
983 dev->data->dev_started = 0;
987 rte_cryptodev_close(uint8_t dev_id)
989 struct rte_cryptodev *dev;
992 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
993 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
997 dev = &rte_crypto_devices[dev_id];
999 /* Device must be stopped before it can be closed */
1000 if (dev->data->dev_started == 1) {
1001 CDEV_LOG_ERR("Device %u must be stopped before closing",
1006 /* We can't close the device if there are outstanding sessions in use */
1007 if (dev->data->session_pool != NULL) {
1008 if (!rte_mempool_full(dev->data->session_pool)) {
1009 CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1010 "has sessions still in use, free "
1011 "all sessions before calling close",
1017 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1018 retval = (*dev->dev_ops->dev_close)(dev);
1019 rte_cryptodev_trace_close(dev_id, retval);
1028 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1029 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1032 struct rte_cryptodev *dev;
1034 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1035 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1039 dev = &rte_crypto_devices[dev_id];
1040 if (queue_pair_id >= dev->data->nb_queue_pairs) {
1041 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1046 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1050 if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1051 (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1052 CDEV_LOG_ERR("Invalid mempools\n");
1056 if (qp_conf->mp_session) {
1057 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1058 uint32_t obj_size = qp_conf->mp_session->elt_size;
1059 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1060 struct rte_cryptodev_sym_session s = {0};
1062 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1063 if (!pool_priv || qp_conf->mp_session->private_data_size <
1064 sizeof(*pool_priv)) {
1065 CDEV_LOG_ERR("Invalid mempool\n");
1069 s.nb_drivers = pool_priv->nb_drivers;
1070 s.user_data_sz = pool_priv->user_data_sz;
1072 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1073 obj_size) || (s.nb_drivers <= dev->driver_id) ||
1074 rte_cryptodev_sym_get_private_session_size(dev_id) >
1076 CDEV_LOG_ERR("Invalid mempool\n");
1081 if (dev->data->dev_started) {
1083 "device %d must be stopped to allow configuration", dev_id);
1087 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1089 rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1090 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1096 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1098 struct rte_cryptodev *dev;
1100 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1101 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1105 if (stats == NULL) {
1106 CDEV_LOG_ERR("Invalid stats ptr");
1110 dev = &rte_crypto_devices[dev_id];
1111 memset(stats, 0, sizeof(*stats));
1113 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1114 (*dev->dev_ops->stats_get)(dev, stats);
1119 rte_cryptodev_stats_reset(uint8_t dev_id)
1121 struct rte_cryptodev *dev;
1123 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1124 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1128 dev = &rte_crypto_devices[dev_id];
1130 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1131 (*dev->dev_ops->stats_reset)(dev);
1136 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1138 struct rte_cryptodev *dev;
1140 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1141 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1145 dev = &rte_crypto_devices[dev_id];
1147 memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1149 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1150 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1152 dev_info->driver_name = dev->device->driver->name;
1153 dev_info->device = dev->device;
1158 rte_cryptodev_callback_register(uint8_t dev_id,
1159 enum rte_cryptodev_event_type event,
1160 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1162 struct rte_cryptodev *dev;
1163 struct rte_cryptodev_callback *user_cb;
1168 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1169 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1173 dev = &rte_crypto_devices[dev_id];
1174 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1176 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1177 if (user_cb->cb_fn == cb_fn &&
1178 user_cb->cb_arg == cb_arg &&
1179 user_cb->event == event) {
1184 /* create a new callback. */
1185 if (user_cb == NULL) {
1186 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1187 sizeof(struct rte_cryptodev_callback), 0);
1188 if (user_cb != NULL) {
1189 user_cb->cb_fn = cb_fn;
1190 user_cb->cb_arg = cb_arg;
1191 user_cb->event = event;
1192 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1196 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1197 return (user_cb == NULL) ? -ENOMEM : 0;
1201 rte_cryptodev_callback_unregister(uint8_t dev_id,
1202 enum rte_cryptodev_event_type event,
1203 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1206 struct rte_cryptodev *dev;
1207 struct rte_cryptodev_callback *cb, *next;
1212 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1213 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1217 dev = &rte_crypto_devices[dev_id];
1218 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1221 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1223 next = TAILQ_NEXT(cb, next);
1225 if (cb->cb_fn != cb_fn || cb->event != event ||
1226 (cb->cb_arg != (void *)-1 &&
1227 cb->cb_arg != cb_arg))
1231 * if this callback is not executing right now,
1234 if (cb->active == 0) {
1235 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1242 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1247 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1248 enum rte_cryptodev_event_type event)
1250 struct rte_cryptodev_callback *cb_lst;
1251 struct rte_cryptodev_callback dev_cb;
1253 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1254 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1255 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1259 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1260 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1262 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1265 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1270 rte_cryptodev_sym_session_init(uint8_t dev_id,
1271 struct rte_cryptodev_sym_session *sess,
1272 struct rte_crypto_sym_xform *xforms,
1273 struct rte_mempool *mp)
1275 struct rte_cryptodev *dev;
1276 uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1281 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1282 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1286 dev = rte_cryptodev_pmd_get_dev(dev_id);
1288 if (sess == NULL || xforms == NULL || dev == NULL)
1291 if (mp->elt_size < sess_priv_sz)
1294 index = dev->driver_id;
1295 if (index >= sess->nb_drivers)
1298 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1300 if (sess->sess_data[index].refcnt == 0) {
1301 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1305 "dev_id %d failed to configure session details",
1311 rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1312 sess->sess_data[index].refcnt++;
1317 rte_cryptodev_asym_session_init(uint8_t dev_id,
1318 struct rte_cryptodev_asym_session *sess,
1319 struct rte_crypto_asym_xform *xforms,
1320 struct rte_mempool *mp)
1322 struct rte_cryptodev *dev;
1326 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1327 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1331 dev = rte_cryptodev_pmd_get_dev(dev_id);
1333 if (sess == NULL || xforms == NULL || dev == NULL)
1336 index = dev->driver_id;
1338 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1341 if (sess->sess_private_data[index] == NULL) {
1342 ret = dev->dev_ops->asym_session_configure(dev,
1347 "dev_id %d failed to configure session details",
1353 rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1357 struct rte_mempool *
1358 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1359 uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1362 struct rte_mempool *mp;
1363 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1366 obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1367 if (obj_sz > elt_size)
1368 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1373 mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1374 (uint32_t)(sizeof(*pool_priv)),
1375 NULL, NULL, NULL, NULL,
1378 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1379 __func__, name, rte_errno);
1383 pool_priv = rte_mempool_get_priv(mp);
1385 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1387 rte_mempool_free(mp);
1391 pool_priv->nb_drivers = nb_drivers;
1392 pool_priv->user_data_sz = user_data_size;
1394 rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1395 elt_size, cache_size, user_data_size, mp);
1400 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1402 return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1406 struct rte_cryptodev_sym_session *
1407 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1409 struct rte_cryptodev_sym_session *sess;
1410 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1413 CDEV_LOG_ERR("Invalid mempool\n");
1417 pool_priv = rte_mempool_get_priv(mp);
1419 if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) {
1420 CDEV_LOG_ERR("Invalid mempool\n");
1424 /* Allocate a session structure from the session pool */
1425 if (rte_mempool_get(mp, (void **)&sess)) {
1426 CDEV_LOG_ERR("couldn't get object from session mempool");
1430 sess->nb_drivers = pool_priv->nb_drivers;
1431 sess->user_data_sz = pool_priv->user_data_sz;
1432 sess->opaque_data = 0;
1434 /* Clear device session pointer.
1435 * Include the flag indicating presence of user data
1437 memset(sess->sess_data, 0,
1438 rte_cryptodev_sym_session_data_size(sess));
1440 rte_cryptodev_trace_sym_session_create(mp, sess);
1444 struct rte_cryptodev_asym_session *
1445 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1447 struct rte_cryptodev_asym_session *sess;
1449 /* Allocate a session structure from the session pool */
1450 if (rte_mempool_get(mp, (void **)&sess)) {
1451 CDEV_LOG_ERR("couldn't get object from session mempool");
1455 /* Clear device session pointer.
1456 * Include the flag indicating presence of private data
1458 memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1460 rte_cryptodev_trace_asym_session_create(mp, sess);
1465 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1466 struct rte_cryptodev_sym_session *sess)
1468 struct rte_cryptodev *dev;
1471 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1472 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1476 dev = rte_cryptodev_pmd_get_dev(dev_id);
1478 if (dev == NULL || sess == NULL)
1481 driver_id = dev->driver_id;
1482 if (sess->sess_data[driver_id].refcnt == 0)
1484 if (--sess->sess_data[driver_id].refcnt != 0)
1487 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1489 dev->dev_ops->sym_session_clear(dev, sess);
1491 rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1496 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1497 struct rte_cryptodev_asym_session *sess)
1499 struct rte_cryptodev *dev;
1501 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1502 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1506 dev = rte_cryptodev_pmd_get_dev(dev_id);
1508 if (dev == NULL || sess == NULL)
1511 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1513 dev->dev_ops->asym_session_clear(dev, sess);
1515 rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1520 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1523 struct rte_mempool *sess_mp;
1528 /* Check that all device private data has been freed */
1529 for (i = 0; i < sess->nb_drivers; i++) {
1530 if (sess->sess_data[i].refcnt != 0)
1534 /* Return session to mempool */
1535 sess_mp = rte_mempool_from_obj(sess);
1536 rte_mempool_put(sess_mp, sess);
1538 rte_cryptodev_trace_sym_session_free(sess);
1543 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1547 struct rte_mempool *sess_mp;
1552 /* Check that all device private data has been freed */
1553 for (i = 0; i < nb_drivers; i++) {
1554 sess_priv = get_asym_session_private_data(sess, i);
1555 if (sess_priv != NULL)
1559 /* Return session to mempool */
1560 sess_mp = rte_mempool_from_obj(sess);
1561 rte_mempool_put(sess_mp, sess);
1563 rte_cryptodev_trace_asym_session_free(sess);
1568 rte_cryptodev_sym_get_header_session_size(void)
1571 * Header contains pointers to the private data of all registered
1572 * drivers and all necessary information to ensure safely clear
1573 * or free al session.
1575 struct rte_cryptodev_sym_session s = {0};
1577 s.nb_drivers = nb_drivers;
1579 return (unsigned int)(sizeof(s) +
1580 rte_cryptodev_sym_session_data_size(&s));
1584 rte_cryptodev_sym_get_existing_header_session_size(
1585 struct rte_cryptodev_sym_session *sess)
1590 return (unsigned int)(sizeof(*sess) +
1591 rte_cryptodev_sym_session_data_size(sess));
1595 rte_cryptodev_asym_get_header_session_size(void)
1598 * Header contains pointers to the private data
1599 * of all registered drivers, and a flag which
1600 * indicates presence of private data
1602 return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1606 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1608 struct rte_cryptodev *dev;
1609 unsigned int priv_sess_size;
1611 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1614 dev = rte_cryptodev_pmd_get_dev(dev_id);
1616 if (*dev->dev_ops->sym_session_get_size == NULL)
1619 priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1621 return priv_sess_size;
1625 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1627 struct rte_cryptodev *dev;
1628 unsigned int header_size = sizeof(void *) * nb_drivers;
1629 unsigned int priv_sess_size;
1631 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1634 dev = rte_cryptodev_pmd_get_dev(dev_id);
1636 if (*dev->dev_ops->asym_session_get_size == NULL)
1639 priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1640 if (priv_sess_size < header_size)
1643 return priv_sess_size;
1648 rte_cryptodev_sym_session_set_user_data(
1649 struct rte_cryptodev_sym_session *sess,
1656 if (sess->user_data_sz < size)
1659 rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1664 rte_cryptodev_sym_session_get_user_data(
1665 struct rte_cryptodev_sym_session *sess)
1667 if (sess == NULL || sess->user_data_sz == 0)
1670 return (void *)(sess->sess_data + sess->nb_drivers);
1674 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
1677 for (i = 0; i < vec->num; i++)
1678 vec->status[i] = errnum;
1682 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1683 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1684 struct rte_crypto_sym_vec *vec)
1686 struct rte_cryptodev *dev;
1688 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1689 sym_crypto_fill_status(vec, EINVAL);
1693 dev = rte_cryptodev_pmd_get_dev(dev_id);
1695 if (*dev->dev_ops->sym_cpu_process == NULL ||
1696 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
1697 sym_crypto_fill_status(vec, ENOTSUP);
1701 return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
1704 /** Initialise rte_crypto_op mempool element */
1706 rte_crypto_op_init(struct rte_mempool *mempool,
1709 __rte_unused unsigned i)
1711 struct rte_crypto_op *op = _op_data;
1712 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1714 memset(_op_data, 0, mempool->elt_size);
1716 __rte_crypto_op_reset(op, type);
1718 op->phys_addr = rte_mem_virt2iova(_op_data);
1719 op->mempool = mempool;
1723 struct rte_mempool *
1724 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1725 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1728 struct rte_crypto_op_pool_private *priv;
1730 unsigned elt_size = sizeof(struct rte_crypto_op) +
1733 if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1734 elt_size += sizeof(struct rte_crypto_sym_op);
1735 } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1736 elt_size += sizeof(struct rte_crypto_asym_op);
1737 } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1738 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1739 sizeof(struct rte_crypto_asym_op));
1741 CDEV_LOG_ERR("Invalid op_type\n");
1745 /* lookup mempool in case already allocated */
1746 struct rte_mempool *mp = rte_mempool_lookup(name);
1749 priv = (struct rte_crypto_op_pool_private *)
1750 rte_mempool_get_priv(mp);
1752 if (mp->elt_size != elt_size ||
1753 mp->cache_size < cache_size ||
1754 mp->size < nb_elts ||
1755 priv->priv_size < priv_size) {
1757 CDEV_LOG_ERR("Mempool %s already exists but with "
1758 "incompatible parameters", name);
1764 mp = rte_mempool_create(
1769 sizeof(struct rte_crypto_op_pool_private),
1778 CDEV_LOG_ERR("Failed to create mempool %s", name);
1782 priv = (struct rte_crypto_op_pool_private *)
1783 rte_mempool_get_priv(mp);
1785 priv->priv_size = priv_size;
1792 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1794 struct rte_cryptodev *dev = NULL;
1800 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1801 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1802 "%s_%u", dev_name_prefix, i);
1807 dev = rte_cryptodev_pmd_get_named_dev(name);
1815 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1817 static struct cryptodev_driver_list cryptodev_driver_list =
1818 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1821 rte_cryptodev_driver_id_get(const char *name)
1823 struct cryptodev_driver *driver;
1824 const char *driver_name;
1827 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1831 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1832 driver_name = driver->driver->name;
1833 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
1840 rte_cryptodev_name_get(uint8_t dev_id)
1842 struct rte_cryptodev *dev;
1844 if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1845 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1849 dev = rte_cryptodev_pmd_get_dev(dev_id);
1853 return dev->data->name;
1857 rte_cryptodev_driver_name_get(uint8_t driver_id)
1859 struct cryptodev_driver *driver;
1861 TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1862 if (driver->id == driver_id)
1863 return driver->driver->name;
1868 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1869 const struct rte_driver *drv)
1871 crypto_drv->driver = drv;
1872 crypto_drv->id = nb_drivers;
1874 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1876 return nb_drivers++;