1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020 Intel Corporation
15 #include <netinet/in.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43 #include "rte_cryptodev_trace.h"
45 static uint8_t nb_drivers;
47 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
49 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
51 static struct rte_cryptodev_global cryptodev_globals = {
52 .devs = rte_crypto_devices,
57 /* spinlock for crypto device callbacks */
58 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
62 * The user application callback description.
64 * It contains callback address to be registered by user application,
65 * the pointer to the parameters for callback, and the event type.
67 struct rte_cryptodev_callback {
68 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
69 rte_cryptodev_cb_fn cb_fn; /**< Callback address */
70 void *cb_arg; /**< Parameter for callback */
71 enum rte_cryptodev_event_type event; /**< Interrupt event type */
72 uint32_t active; /**< Callback is executing */
76 * The crypto cipher algorithm strings identifiers.
77 * It could be used in application command line.
80 rte_crypto_cipher_algorithm_strings[] = {
81 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc",
82 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb",
83 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr",
85 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc",
86 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr",
87 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi",
88 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb",
89 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8",
90 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts",
92 [RTE_CRYPTO_CIPHER_ARC4] = "arc4",
94 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc",
95 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi",
97 [RTE_CRYPTO_CIPHER_NULL] = "null",
99 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8",
100 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
101 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3"
105 * The crypto cipher operation strings identifiers.
106 * It could be used in application command line.
109 rte_crypto_cipher_operation_strings[] = {
110 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt",
111 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt"
115 * The crypto auth algorithm strings identifiers.
116 * It could be used in application command line.
119 rte_crypto_auth_algorithm_strings[] = {
120 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac",
121 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac",
122 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac",
123 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac",
125 [RTE_CRYPTO_AUTH_MD5] = "md5",
126 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac",
128 [RTE_CRYPTO_AUTH_NULL] = "null",
130 [RTE_CRYPTO_AUTH_SHA1] = "sha1",
131 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac",
133 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224",
134 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac",
135 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256",
136 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac",
137 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384",
138 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac",
139 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512",
140 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac",
142 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9",
143 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2",
144 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3"
148 * The crypto AEAD algorithm strings identifiers.
149 * It could be used in application command line.
152 rte_crypto_aead_algorithm_strings[] = {
153 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm",
154 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm",
158 * The crypto AEAD operation strings identifiers.
159 * It could be used in application command line.
162 rte_crypto_aead_operation_strings[] = {
163 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt",
164 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt"
168 * Asymmetric crypto transform operation strings identifiers.
170 const char *rte_crypto_asym_xform_strings[] = {
171 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none",
172 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa",
173 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp",
174 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv",
175 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh",
176 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa",
177 [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa",
178 [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm",
182 * Asymmetric crypto operation strings identifiers.
184 const char *rte_crypto_asym_op_strings[] = {
185 [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt",
186 [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt",
187 [RTE_CRYPTO_ASYM_OP_SIGN] = "sign",
188 [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify",
189 [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE] = "priv_key_generate",
190 [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
191 [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
195 * The private data structure stored in the session mempool private data.
197 struct rte_cryptodev_sym_session_pool_private_data {
199 /**< number of elements in sess_data array */
200 uint16_t user_data_sz;
201 /**< session user data will be placed after sess_data */
205 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
206 const char *algo_string)
210 for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
211 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
212 *algo_enum = (enum rte_crypto_cipher_algorithm) i;
222 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
223 const char *algo_string)
227 for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
228 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
229 *algo_enum = (enum rte_crypto_auth_algorithm) i;
239 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
240 const char *algo_string)
244 for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
245 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
246 *algo_enum = (enum rte_crypto_aead_algorithm) i;
256 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
257 const char *xform_string)
261 for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
262 if (strcmp(xform_string,
263 rte_crypto_asym_xform_strings[i]) == 0) {
264 *xform_enum = (enum rte_crypto_asym_xform_type) i;
274 * The crypto auth operation strings identifiers.
275 * It could be used in application command line.
278 rte_crypto_auth_operation_strings[] = {
279 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify",
280 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate"
283 const struct rte_cryptodev_symmetric_capability *
284 rte_cryptodev_sym_capability_get(uint8_t dev_id,
285 const struct rte_cryptodev_sym_capability_idx *idx)
287 const struct rte_cryptodev_capabilities *capability;
288 struct rte_cryptodev_info dev_info;
291 rte_cryptodev_info_get(dev_id, &dev_info);
293 while ((capability = &dev_info.capabilities[i++])->op !=
294 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
295 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
298 if (capability->sym.xform_type != idx->type)
301 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
302 capability->sym.auth.algo == idx->algo.auth)
303 return &capability->sym;
305 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
306 capability->sym.cipher.algo == idx->algo.cipher)
307 return &capability->sym;
309 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
310 capability->sym.aead.algo == idx->algo.aead)
311 return &capability->sym;
319 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
321 unsigned int next_size;
323 /* Check lower/upper bounds */
324 if (size < range->min)
327 if (size > range->max)
330 /* If range is actually only one value, size is correct */
331 if (range->increment == 0)
334 /* Check if value is one of the supported sizes */
335 for (next_size = range->min; next_size <= range->max;
336 next_size += range->increment)
337 if (size == next_size)
343 const struct rte_cryptodev_asymmetric_xform_capability *
344 rte_cryptodev_asym_capability_get(uint8_t dev_id,
345 const struct rte_cryptodev_asym_capability_idx *idx)
347 const struct rte_cryptodev_capabilities *capability;
348 struct rte_cryptodev_info dev_info;
351 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
352 rte_cryptodev_info_get(dev_id, &dev_info);
354 while ((capability = &dev_info.capabilities[i++])->op !=
355 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
356 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
359 if (capability->asym.xform_capa.xform_type == idx->type)
360 return &capability->asym.xform_capa;
366 rte_cryptodev_sym_capability_check_cipher(
367 const struct rte_cryptodev_symmetric_capability *capability,
368 uint16_t key_size, uint16_t iv_size)
370 if (param_range_check(key_size, &capability->cipher.key_size) != 0)
373 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
380 rte_cryptodev_sym_capability_check_auth(
381 const struct rte_cryptodev_symmetric_capability *capability,
382 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
384 if (param_range_check(key_size, &capability->auth.key_size) != 0)
387 if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
390 if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
397 rte_cryptodev_sym_capability_check_aead(
398 const struct rte_cryptodev_symmetric_capability *capability,
399 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
402 if (param_range_check(key_size, &capability->aead.key_size) != 0)
405 if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
408 if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
411 if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
417 rte_cryptodev_asym_xform_capability_check_optype(
418 const struct rte_cryptodev_asymmetric_xform_capability *capability,
419 enum rte_crypto_asym_op_type op_type)
421 if (capability->op_types & (1 << op_type))
428 rte_cryptodev_asym_xform_capability_check_modlen(
429 const struct rte_cryptodev_asymmetric_xform_capability *capability,
432 /* no need to check for limits, if min or max = 0 */
433 if (capability->modlen.min != 0) {
434 if (modlen < capability->modlen.min)
438 if (capability->modlen.max != 0) {
439 if (modlen > capability->modlen.max)
443 /* in any case, check if given modlen is module increment */
444 if (capability->modlen.increment != 0) {
445 if (modlen % (capability->modlen.increment))
454 rte_cryptodev_get_feature_name(uint64_t flag)
457 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
458 return "SYMMETRIC_CRYPTO";
459 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
460 return "ASYMMETRIC_CRYPTO";
461 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
462 return "SYM_OPERATION_CHAINING";
463 case RTE_CRYPTODEV_FF_CPU_SSE:
465 case RTE_CRYPTODEV_FF_CPU_AVX:
467 case RTE_CRYPTODEV_FF_CPU_AVX2:
469 case RTE_CRYPTODEV_FF_CPU_AVX512:
471 case RTE_CRYPTODEV_FF_CPU_AESNI:
473 case RTE_CRYPTODEV_FF_HW_ACCELERATED:
474 return "HW_ACCELERATED";
475 case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
476 return "IN_PLACE_SGL";
477 case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
478 return "OOP_SGL_IN_SGL_OUT";
479 case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
480 return "OOP_SGL_IN_LB_OUT";
481 case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
482 return "OOP_LB_IN_SGL_OUT";
483 case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
484 return "OOP_LB_IN_LB_OUT";
485 case RTE_CRYPTODEV_FF_CPU_NEON:
487 case RTE_CRYPTODEV_FF_CPU_ARM_CE:
489 case RTE_CRYPTODEV_FF_SECURITY:
490 return "SECURITY_PROTOCOL";
491 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
492 return "RSA_PRIV_OP_KEY_EXP";
493 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
494 return "RSA_PRIV_OP_KEY_QT";
495 case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
496 return "DIGEST_ENCRYPTED";
497 case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
498 return "SYM_CPU_CRYPTO";
499 case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
500 return "ASYM_SESSIONLESS";
501 case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
502 return "SYM_SESSIONLESS";
508 struct rte_cryptodev *
509 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
511 return &cryptodev_globals.devs[dev_id];
514 struct rte_cryptodev *
515 rte_cryptodev_pmd_get_named_dev(const char *name)
517 struct rte_cryptodev *dev;
523 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
524 dev = &cryptodev_globals.devs[i];
526 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
527 (strcmp(dev->data->name, name) == 0))
534 static inline uint8_t
535 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
537 if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
538 rte_crypto_devices[dev_id].data == NULL)
545 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
547 struct rte_cryptodev *dev = NULL;
549 if (!rte_cryptodev_is_valid_device_data(dev_id))
552 dev = rte_cryptodev_pmd_get_dev(dev_id);
553 if (dev->attached != RTE_CRYPTODEV_ATTACHED)
561 rte_cryptodev_get_dev_id(const char *name)
568 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
569 if (!rte_cryptodev_is_valid_device_data(i))
571 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
573 (cryptodev_globals.devs[i].attached ==
574 RTE_CRYPTODEV_ATTACHED))
582 rte_cryptodev_count(void)
584 return cryptodev_globals.nb_devs;
588 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
590 uint8_t i, dev_count = 0;
592 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
593 if (cryptodev_globals.devs[i].driver_id == driver_id &&
594 cryptodev_globals.devs[i].attached ==
595 RTE_CRYPTODEV_ATTACHED)
602 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
605 uint8_t i, count = 0;
606 struct rte_cryptodev *devs = cryptodev_globals.devs;
608 for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
609 if (!rte_cryptodev_is_valid_device_data(i))
612 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
615 cmp = strncmp(devs[i].device->driver->name,
617 strlen(driver_name) + 1);
620 devices[count++] = devs[i].data->dev_id;
628 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
630 if (dev_id < RTE_CRYPTO_MAX_DEVS &&
631 (rte_crypto_devices[dev_id].feature_flags &
632 RTE_CRYPTODEV_FF_SECURITY))
633 return rte_crypto_devices[dev_id].security_ctx;
639 rte_cryptodev_socket_id(uint8_t dev_id)
641 struct rte_cryptodev *dev;
643 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
646 dev = rte_cryptodev_pmd_get_dev(dev_id);
648 return dev->data->socket_id;
652 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
655 char mz_name[RTE_MEMZONE_NAMESIZE];
656 const struct rte_memzone *mz;
659 /* generate memzone name */
660 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
661 if (n >= (int)sizeof(mz_name))
664 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
665 mz = rte_memzone_reserve(mz_name,
666 sizeof(struct rte_cryptodev_data),
669 mz = rte_memzone_lookup(mz_name);
675 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
676 memset(*data, 0, sizeof(struct rte_cryptodev_data));
682 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
684 char mz_name[RTE_MEMZONE_NAMESIZE];
685 const struct rte_memzone *mz;
688 /* generate memzone name */
689 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
690 if (n >= (int)sizeof(mz_name))
693 mz = rte_memzone_lookup(mz_name);
697 RTE_ASSERT(*data == mz->addr);
700 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
701 return rte_memzone_free(mz);
707 rte_cryptodev_find_free_device_index(void)
711 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
712 if (rte_crypto_devices[dev_id].attached ==
713 RTE_CRYPTODEV_DETACHED)
716 return RTE_CRYPTO_MAX_DEVS;
719 struct rte_cryptodev *
720 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
722 struct rte_cryptodev *cryptodev;
725 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
726 CDEV_LOG_ERR("Crypto device with name %s already "
731 dev_id = rte_cryptodev_find_free_device_index();
732 if (dev_id == RTE_CRYPTO_MAX_DEVS) {
733 CDEV_LOG_ERR("Reached maximum number of crypto devices");
737 cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
739 if (cryptodev->data == NULL) {
740 struct rte_cryptodev_data **cryptodev_data =
741 &cryptodev_globals.data[dev_id];
743 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
746 if (retval < 0 || *cryptodev_data == NULL)
749 cryptodev->data = *cryptodev_data;
751 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
752 strlcpy(cryptodev->data->name, name,
753 RTE_CRYPTODEV_NAME_MAX_LEN);
755 cryptodev->data->dev_id = dev_id;
756 cryptodev->data->socket_id = socket_id;
757 cryptodev->data->dev_started = 0;
760 /* init user callbacks */
761 TAILQ_INIT(&(cryptodev->link_intr_cbs));
763 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
765 cryptodev_globals.nb_devs++;
772 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
777 if (cryptodev == NULL)
780 dev_id = cryptodev->data->dev_id;
782 /* Close device only if device operations have been set */
783 if (cryptodev->dev_ops) {
784 ret = rte_cryptodev_close(dev_id);
789 ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
793 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
794 cryptodev_globals.nb_devs--;
799 rte_cryptodev_queue_pair_count(uint8_t dev_id)
801 struct rte_cryptodev *dev;
803 if (!rte_cryptodev_is_valid_device_data(dev_id)) {
804 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
808 dev = &rte_crypto_devices[dev_id];
809 return dev->data->nb_queue_pairs;
813 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
816 struct rte_cryptodev_info dev_info;
820 if ((dev == NULL) || (nb_qpairs < 1)) {
821 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
826 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
827 nb_qpairs, dev->data->dev_id);
829 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
831 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
832 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
834 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
835 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
836 nb_qpairs, dev->data->dev_id);
840 if (dev->data->queue_pairs == NULL) { /* first time configuration */
841 dev->data->queue_pairs = rte_zmalloc_socket(
842 "cryptodev->queue_pairs",
843 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
844 RTE_CACHE_LINE_SIZE, socket_id);
846 if (dev->data->queue_pairs == NULL) {
847 dev->data->nb_queue_pairs = 0;
848 CDEV_LOG_ERR("failed to get memory for qp meta data, "
853 } else { /* re-configure */
855 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
857 qp = dev->data->queue_pairs;
859 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
862 for (i = nb_qpairs; i < old_nb_queues; i++) {
863 ret = (*dev->dev_ops->queue_pair_release)(dev, i);
868 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
869 RTE_CACHE_LINE_SIZE);
871 CDEV_LOG_ERR("failed to realloc qp meta data,"
872 " nb_queues %u", nb_qpairs);
876 if (nb_qpairs > old_nb_queues) {
877 uint16_t new_qs = nb_qpairs - old_nb_queues;
879 memset(qp + old_nb_queues, 0,
880 sizeof(qp[0]) * new_qs);
883 dev->data->queue_pairs = qp;
886 dev->data->nb_queue_pairs = nb_qpairs;
891 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
893 struct rte_cryptodev *dev;
896 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
897 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
901 dev = &rte_crypto_devices[dev_id];
903 if (dev->data->dev_started) {
905 "device %d must be stopped to allow configuration", dev_id);
909 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
911 /* Setup new number of queue pairs and reconfigure device. */
912 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
915 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
920 rte_cryptodev_trace_configure(dev_id, config);
921 return (*dev->dev_ops->dev_configure)(dev, config);
926 rte_cryptodev_start(uint8_t dev_id)
928 struct rte_cryptodev *dev;
931 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
933 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
934 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
938 dev = &rte_crypto_devices[dev_id];
940 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
942 if (dev->data->dev_started != 0) {
943 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
948 diag = (*dev->dev_ops->dev_start)(dev);
949 rte_cryptodev_trace_start(dev_id, diag);
951 dev->data->dev_started = 1;
959 rte_cryptodev_stop(uint8_t dev_id)
961 struct rte_cryptodev *dev;
963 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
964 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
968 dev = &rte_crypto_devices[dev_id];
970 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
972 if (dev->data->dev_started == 0) {
973 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
978 (*dev->dev_ops->dev_stop)(dev);
979 rte_cryptodev_trace_stop(dev_id);
980 dev->data->dev_started = 0;
984 rte_cryptodev_close(uint8_t dev_id)
986 struct rte_cryptodev *dev;
989 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
990 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
994 dev = &rte_crypto_devices[dev_id];
996 /* Device must be stopped before it can be closed */
997 if (dev->data->dev_started == 1) {
998 CDEV_LOG_ERR("Device %u must be stopped before closing",
1003 /* We can't close the device if there are outstanding sessions in use */
1004 if (dev->data->session_pool != NULL) {
1005 if (!rte_mempool_full(dev->data->session_pool)) {
1006 CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1007 "has sessions still in use, free "
1008 "all sessions before calling close",
1014 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1015 retval = (*dev->dev_ops->dev_close)(dev);
1016 rte_cryptodev_trace_close(dev_id, retval);
1025 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1026 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1029 struct rte_cryptodev *dev;
1031 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1032 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1036 dev = &rte_crypto_devices[dev_id];
1037 if (queue_pair_id >= dev->data->nb_queue_pairs) {
1038 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1043 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1047 if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1048 (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1049 CDEV_LOG_ERR("Invalid mempools\n");
1053 if (qp_conf->mp_session) {
1054 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1055 uint32_t obj_size = qp_conf->mp_session->elt_size;
1056 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1057 struct rte_cryptodev_sym_session s = {0};
1059 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1060 if (!pool_priv || qp_conf->mp_session->private_data_size <
1061 sizeof(*pool_priv)) {
1062 CDEV_LOG_ERR("Invalid mempool\n");
1066 s.nb_drivers = pool_priv->nb_drivers;
1067 s.user_data_sz = pool_priv->user_data_sz;
1069 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1070 obj_size) || (s.nb_drivers <= dev->driver_id) ||
1071 rte_cryptodev_sym_get_private_session_size(dev_id) >
1073 CDEV_LOG_ERR("Invalid mempool\n");
1078 if (dev->data->dev_started) {
1080 "device %d must be stopped to allow configuration", dev_id);
1084 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1086 rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1087 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1093 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1095 struct rte_cryptodev *dev;
1097 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1098 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1102 if (stats == NULL) {
1103 CDEV_LOG_ERR("Invalid stats ptr");
1107 dev = &rte_crypto_devices[dev_id];
1108 memset(stats, 0, sizeof(*stats));
1110 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1111 (*dev->dev_ops->stats_get)(dev, stats);
1116 rte_cryptodev_stats_reset(uint8_t dev_id)
1118 struct rte_cryptodev *dev;
1120 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1121 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1125 dev = &rte_crypto_devices[dev_id];
1127 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1128 (*dev->dev_ops->stats_reset)(dev);
1133 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1135 struct rte_cryptodev *dev;
1137 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1138 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1142 dev = &rte_crypto_devices[dev_id];
1144 memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1146 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1147 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1149 dev_info->driver_name = dev->device->driver->name;
1150 dev_info->device = dev->device;
1155 rte_cryptodev_callback_register(uint8_t dev_id,
1156 enum rte_cryptodev_event_type event,
1157 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1159 struct rte_cryptodev *dev;
1160 struct rte_cryptodev_callback *user_cb;
1165 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1166 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1170 dev = &rte_crypto_devices[dev_id];
1171 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1173 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1174 if (user_cb->cb_fn == cb_fn &&
1175 user_cb->cb_arg == cb_arg &&
1176 user_cb->event == event) {
1181 /* create a new callback. */
1182 if (user_cb == NULL) {
1183 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1184 sizeof(struct rte_cryptodev_callback), 0);
1185 if (user_cb != NULL) {
1186 user_cb->cb_fn = cb_fn;
1187 user_cb->cb_arg = cb_arg;
1188 user_cb->event = event;
1189 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1193 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1194 return (user_cb == NULL) ? -ENOMEM : 0;
1198 rte_cryptodev_callback_unregister(uint8_t dev_id,
1199 enum rte_cryptodev_event_type event,
1200 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1203 struct rte_cryptodev *dev;
1204 struct rte_cryptodev_callback *cb, *next;
1209 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1210 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1214 dev = &rte_crypto_devices[dev_id];
1215 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1218 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1220 next = TAILQ_NEXT(cb, next);
1222 if (cb->cb_fn != cb_fn || cb->event != event ||
1223 (cb->cb_arg != (void *)-1 &&
1224 cb->cb_arg != cb_arg))
1228 * if this callback is not executing right now,
1231 if (cb->active == 0) {
1232 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1239 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1244 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1245 enum rte_cryptodev_event_type event)
1247 struct rte_cryptodev_callback *cb_lst;
1248 struct rte_cryptodev_callback dev_cb;
1250 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1251 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1252 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1256 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1257 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1259 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1262 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1267 rte_cryptodev_sym_session_init(uint8_t dev_id,
1268 struct rte_cryptodev_sym_session *sess,
1269 struct rte_crypto_sym_xform *xforms,
1270 struct rte_mempool *mp)
1272 struct rte_cryptodev *dev;
1273 uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1278 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1279 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1283 dev = rte_cryptodev_pmd_get_dev(dev_id);
1285 if (sess == NULL || xforms == NULL || dev == NULL)
1288 if (mp->elt_size < sess_priv_sz)
1291 index = dev->driver_id;
1292 if (index >= sess->nb_drivers)
1295 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1297 if (sess->sess_data[index].refcnt == 0) {
1298 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1302 "dev_id %d failed to configure session details",
1308 rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1309 sess->sess_data[index].refcnt++;
1314 rte_cryptodev_asym_session_init(uint8_t dev_id,
1315 struct rte_cryptodev_asym_session *sess,
1316 struct rte_crypto_asym_xform *xforms,
1317 struct rte_mempool *mp)
1319 struct rte_cryptodev *dev;
1323 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1324 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1328 dev = rte_cryptodev_pmd_get_dev(dev_id);
1330 if (sess == NULL || xforms == NULL || dev == NULL)
1333 index = dev->driver_id;
1335 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1338 if (sess->sess_private_data[index] == NULL) {
1339 ret = dev->dev_ops->asym_session_configure(dev,
1344 "dev_id %d failed to configure session details",
1350 rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1354 struct rte_mempool *
1355 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1356 uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1359 struct rte_mempool *mp;
1360 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1363 obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1364 if (obj_sz > elt_size)
1365 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1370 mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1371 (uint32_t)(sizeof(*pool_priv)),
1372 NULL, NULL, NULL, NULL,
1375 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1376 __func__, name, rte_errno);
1380 pool_priv = rte_mempool_get_priv(mp);
1382 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1384 rte_mempool_free(mp);
1388 pool_priv->nb_drivers = nb_drivers;
1389 pool_priv->user_data_sz = user_data_size;
1391 rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1392 elt_size, cache_size, user_data_size, mp);
1397 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1399 return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1403 struct rte_cryptodev_sym_session *
1404 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1406 struct rte_cryptodev_sym_session *sess;
1407 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1410 CDEV_LOG_ERR("Invalid mempool\n");
1414 pool_priv = rte_mempool_get_priv(mp);
1416 if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) {
1417 CDEV_LOG_ERR("Invalid mempool\n");
1421 /* Allocate a session structure from the session pool */
1422 if (rte_mempool_get(mp, (void **)&sess)) {
1423 CDEV_LOG_ERR("couldn't get object from session mempool");
1427 sess->nb_drivers = pool_priv->nb_drivers;
1428 sess->user_data_sz = pool_priv->user_data_sz;
1429 sess->opaque_data = 0;
1431 /* Clear device session pointer.
1432 * Include the flag indicating presence of user data
1434 memset(sess->sess_data, 0,
1435 rte_cryptodev_sym_session_data_size(sess));
1437 rte_cryptodev_trace_sym_session_create(mp, sess);
1441 struct rte_cryptodev_asym_session *
1442 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1444 struct rte_cryptodev_asym_session *sess;
1446 /* Allocate a session structure from the session pool */
1447 if (rte_mempool_get(mp, (void **)&sess)) {
1448 CDEV_LOG_ERR("couldn't get object from session mempool");
1452 /* Clear device session pointer.
1453 * Include the flag indicating presence of private data
1455 memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1457 rte_cryptodev_trace_asym_session_create(mp, sess);
1462 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1463 struct rte_cryptodev_sym_session *sess)
1465 struct rte_cryptodev *dev;
1468 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1469 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1473 dev = rte_cryptodev_pmd_get_dev(dev_id);
1475 if (dev == NULL || sess == NULL)
1478 driver_id = dev->driver_id;
1479 if (sess->sess_data[driver_id].refcnt == 0)
1481 if (--sess->sess_data[driver_id].refcnt != 0)
1484 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1486 dev->dev_ops->sym_session_clear(dev, sess);
1488 rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1493 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1494 struct rte_cryptodev_asym_session *sess)
1496 struct rte_cryptodev *dev;
1498 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1499 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1503 dev = rte_cryptodev_pmd_get_dev(dev_id);
1505 if (dev == NULL || sess == NULL)
1508 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1510 dev->dev_ops->asym_session_clear(dev, sess);
1512 rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1517 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1520 struct rte_mempool *sess_mp;
1525 /* Check that all device private data has been freed */
1526 for (i = 0; i < sess->nb_drivers; i++) {
1527 if (sess->sess_data[i].refcnt != 0)
1531 /* Return session to mempool */
1532 sess_mp = rte_mempool_from_obj(sess);
1533 rte_mempool_put(sess_mp, sess);
1535 rte_cryptodev_trace_sym_session_free(sess);
1540 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
1544 struct rte_mempool *sess_mp;
1549 /* Check that all device private data has been freed */
1550 for (i = 0; i < nb_drivers; i++) {
1551 sess_priv = get_asym_session_private_data(sess, i);
1552 if (sess_priv != NULL)
1556 /* Return session to mempool */
1557 sess_mp = rte_mempool_from_obj(sess);
1558 rte_mempool_put(sess_mp, sess);
1560 rte_cryptodev_trace_asym_session_free(sess);
1565 rte_cryptodev_sym_get_header_session_size(void)
1568 * Header contains pointers to the private data of all registered
1569 * drivers and all necessary information to ensure safely clear
1570 * or free al session.
1572 struct rte_cryptodev_sym_session s = {0};
1574 s.nb_drivers = nb_drivers;
1576 return (unsigned int)(sizeof(s) +
1577 rte_cryptodev_sym_session_data_size(&s));
1581 rte_cryptodev_sym_get_existing_header_session_size(
1582 struct rte_cryptodev_sym_session *sess)
1587 return (unsigned int)(sizeof(*sess) +
1588 rte_cryptodev_sym_session_data_size(sess));
1592 rte_cryptodev_asym_get_header_session_size(void)
1595 * Header contains pointers to the private data
1596 * of all registered drivers, and a flag which
1597 * indicates presence of private data
1599 return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
1603 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
1605 struct rte_cryptodev *dev;
1606 unsigned int priv_sess_size;
1608 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1611 dev = rte_cryptodev_pmd_get_dev(dev_id);
1613 if (*dev->dev_ops->sym_session_get_size == NULL)
1616 priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
1618 return priv_sess_size;
1622 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
1624 struct rte_cryptodev *dev;
1625 unsigned int header_size = sizeof(void *) * nb_drivers;
1626 unsigned int priv_sess_size;
1628 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1631 dev = rte_cryptodev_pmd_get_dev(dev_id);
1633 if (*dev->dev_ops->asym_session_get_size == NULL)
1636 priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
1637 if (priv_sess_size < header_size)
1640 return priv_sess_size;
1645 rte_cryptodev_sym_session_set_user_data(
1646 struct rte_cryptodev_sym_session *sess,
1653 if (sess->user_data_sz < size)
1656 rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
1661 rte_cryptodev_sym_session_get_user_data(
1662 struct rte_cryptodev_sym_session *sess)
1664 if (sess == NULL || sess->user_data_sz == 0)
1667 return (void *)(sess->sess_data + sess->nb_drivers);
1671 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
1674 for (i = 0; i < vec->num; i++)
1675 vec->status[i] = errnum;
1679 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1680 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1681 struct rte_crypto_sym_vec *vec)
1683 struct rte_cryptodev *dev;
1685 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1686 sym_crypto_fill_status(vec, EINVAL);
1690 dev = rte_cryptodev_pmd_get_dev(dev_id);
1692 if (*dev->dev_ops->sym_cpu_process == NULL ||
1693 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
1694 sym_crypto_fill_status(vec, ENOTSUP);
1698 return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
1701 /** Initialise rte_crypto_op mempool element */
1703 rte_crypto_op_init(struct rte_mempool *mempool,
1706 __rte_unused unsigned i)
1708 struct rte_crypto_op *op = _op_data;
1709 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1711 memset(_op_data, 0, mempool->elt_size);
1713 __rte_crypto_op_reset(op, type);
1715 op->phys_addr = rte_mem_virt2iova(_op_data);
1716 op->mempool = mempool;
1720 struct rte_mempool *
1721 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1722 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1725 struct rte_crypto_op_pool_private *priv;
1727 unsigned elt_size = sizeof(struct rte_crypto_op) +
1730 if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1731 elt_size += sizeof(struct rte_crypto_sym_op);
1732 } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1733 elt_size += sizeof(struct rte_crypto_asym_op);
1734 } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1735 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
1736 sizeof(struct rte_crypto_asym_op));
1738 CDEV_LOG_ERR("Invalid op_type\n");
1742 /* lookup mempool in case already allocated */
1743 struct rte_mempool *mp = rte_mempool_lookup(name);
1746 priv = (struct rte_crypto_op_pool_private *)
1747 rte_mempool_get_priv(mp);
1749 if (mp->elt_size != elt_size ||
1750 mp->cache_size < cache_size ||
1751 mp->size < nb_elts ||
1752 priv->priv_size < priv_size) {
1754 CDEV_LOG_ERR("Mempool %s already exists but with "
1755 "incompatible parameters", name);
1761 mp = rte_mempool_create(
1766 sizeof(struct rte_crypto_op_pool_private),
1775 CDEV_LOG_ERR("Failed to create mempool %s", name);
1779 priv = (struct rte_crypto_op_pool_private *)
1780 rte_mempool_get_priv(mp);
1782 priv->priv_size = priv_size;
1789 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1791 struct rte_cryptodev *dev = NULL;
1797 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1798 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1799 "%s_%u", dev_name_prefix, i);
1804 dev = rte_cryptodev_pmd_get_named_dev(name);
1812 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
1814 static struct cryptodev_driver_list cryptodev_driver_list =
1815 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
1818 rte_cryptodev_driver_id_get(const char *name)
1820 struct cryptodev_driver *driver;
1821 const char *driver_name;
1824 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
1828 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
1829 driver_name = driver->driver->name;
1830 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
1837 rte_cryptodev_name_get(uint8_t dev_id)
1839 struct rte_cryptodev *dev;
1841 if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1842 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1846 dev = rte_cryptodev_pmd_get_dev(dev_id);
1850 return dev->data->name;
1854 rte_cryptodev_driver_name_get(uint8_t driver_id)
1856 struct cryptodev_driver *driver;
1858 TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
1859 if (driver->id == driver_id)
1860 return driver->driver->name;
1865 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
1866 const struct rte_driver *drv)
1868 crypto_drv->driver = drv;
1869 crypto_drv->id = nb_drivers;
1871 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
1873 return nb_drivers++;