X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_cryptodev%2Frte_cryptodev.c;h=e37b83afd2c477066d079da8aa9c8927334a1277;hb=a0f0de06d457753c94688d551a6e8659b4d4e041;hp=0ac23ed85586dbc561612d51f89fabd1ee769603;hpb=7c085e85c272c3e957d183ee30a8a168f1551416;p=dpdk.git diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c index 0ac23ed855..e37b83afd2 100644 --- a/lib/librte_cryptodev/rte_cryptodev.c +++ b/lib/librte_cryptodev/rte_cryptodev.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2015-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2020 Intel Corporation */ #include @@ -47,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -65,27 +36,37 @@ #include #include #include +#include +#include #include "rte_crypto.h" #include "rte_cryptodev.h" #include "rte_cryptodev_pmd.h" +#include "rte_cryptodev_trace.h" -struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS]; +static uint8_t nb_drivers; -struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0]; +static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS]; + +struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices; static struct rte_cryptodev_global cryptodev_globals = { - .devs = &rte_crypto_devices[0], + .devs = rte_crypto_devices, .data = { NULL }, - .nb_devs = 0, - .max_devs = RTE_CRYPTO_MAX_DEVS + .nb_devs = 0 }; -struct rte_cryptodev_global *rte_cryptodev_globals = &cryptodev_globals; - /* spinlock for crypto device callbacks */ static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER; +static const struct rte_cryptodev_capabilities + cryptodev_undefined_capabilities[] = { + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +static struct rte_cryptodev_capabilities + *capability_copy[RTE_CRYPTO_MAX_DEVS]; +static uint8_t is_capability_checked[RTE_CRYPTO_MAX_DEVS]; /** * The user application callback description. @@ -101,18 +82,6 @@ struct rte_cryptodev_callback { uint32_t active; /**< Callback is executing */ }; -#define RTE_CRYPTODEV_VDEV_NAME ("name") -#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs") -#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions") -#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id") - -static const char *cryptodev_vdev_valid_params[] = { - RTE_CRYPTODEV_VDEV_NAME, - RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG, - RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG, - RTE_CRYPTODEV_VDEV_SOCKET_ID -}; - /** * The crypto cipher algorithm strings identifiers. * It could be used in application command line. @@ -124,11 +93,9 @@ rte_crypto_cipher_algorithm_strings[] = { [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr", [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc", - [RTE_CRYPTO_CIPHER_AES_CCM] = "aes-ccm", [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr", [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi", [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb", - [RTE_CRYPTO_CIPHER_AES_GCM] = "aes-gcm", [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8", [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts", @@ -161,9 +128,7 @@ rte_crypto_cipher_operation_strings[] = { const char * rte_crypto_auth_algorithm_strings[] = { [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac", - [RTE_CRYPTO_AUTH_AES_CCM] = "aes-ccm", [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac", - [RTE_CRYPTO_AUTH_AES_GCM] = "aes-gcm", [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac", [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac", @@ -189,6 +154,64 @@ rte_crypto_auth_algorithm_strings[] = { [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3" }; +/** + * The crypto AEAD algorithm strings identifiers. + * It could be used in application command line. + */ +const char * +rte_crypto_aead_algorithm_strings[] = { + [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm", + [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm", + [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305" +}; + +/** + * The crypto AEAD operation strings identifiers. + * It could be used in application command line. + */ +const char * +rte_crypto_aead_operation_strings[] = { + [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt", + [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt" +}; + +/** + * Asymmetric crypto transform operation strings identifiers. + */ +const char *rte_crypto_asym_xform_strings[] = { + [RTE_CRYPTO_ASYM_XFORM_NONE] = "none", + [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa", + [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp", + [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv", + [RTE_CRYPTO_ASYM_XFORM_DH] = "dh", + [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa", + [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa", + [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm", +}; + +/** + * Asymmetric crypto operation strings identifiers. + */ +const char *rte_crypto_asym_op_strings[] = { + [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt", + [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt", + [RTE_CRYPTO_ASYM_OP_SIGN] = "sign", + [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify", + [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE] = "priv_key_generate", + [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate", + [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute", +}; + +/** + * The private data structure stored in the session mempool private data. + */ +struct rte_cryptodev_sym_session_pool_private_data { + uint16_t nb_drivers; + /**< number of elements in sess_data array */ + uint16_t user_data_sz; + /**< session user data will be placed after sess_data */ +}; + int rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, const char *algo_string) @@ -223,123 +246,88 @@ rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, return -1; } -/** - * The crypto auth operation strings identifiers. - * It could be used in application command line. - */ -const char * -rte_crypto_auth_operation_strings[] = { - [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify", - [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate" -}; - -static uint8_t -number_of_sockets(void) +int +rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, + const char *algo_string) { - int sockets = 0; - int i; - const struct rte_memseg *ms = rte_eal_get_physmem_layout(); + unsigned int i; - for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) { - if (sockets < ms[i].socket_id) - sockets = ms[i].socket_id; + for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) { + if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) { + *algo_enum = (enum rte_crypto_aead_algorithm) i; + return 0; + } } - /* Number of sockets = maximum socket_id + 1 */ - return ++sockets; + /* Invalid string */ + return -1; } -/** Parse integer from integer argument */ -static int -parse_integer_arg(const char *key __rte_unused, - const char *value, void *extra_args) +int +rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, + const char *xform_string) { - int *i = (int *) extra_args; + unsigned int i; - *i = atoi(value); - if (*i < 0) { - CDEV_LOG_ERR("Argument has to be positive."); - return -1; + for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) { + if (strcmp(xform_string, + rte_crypto_asym_xform_strings[i]) == 0) { + *xform_enum = (enum rte_crypto_asym_xform_type) i; + return 0; + } } - return 0; + /* Invalid string */ + return -1; } -/** Parse name */ -static int -parse_name_arg(const char *key __rte_unused, - const char *value, void *extra_args) -{ - struct rte_crypto_vdev_init_params *params = extra_args; - - if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) { - CDEV_LOG_ERR("Invalid name %s, should be less than " - "%u bytes", value, - RTE_CRYPTODEV_NAME_MAX_LEN - 1); - return -1; - } - - strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN); - - return 0; -} +/** + * The crypto auth operation strings identifiers. + * It could be used in application command line. + */ +const char * +rte_crypto_auth_operation_strings[] = { + [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify", + [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate" +}; -int -rte_cryptodev_parse_vdev_init_params(struct rte_crypto_vdev_init_params *params, - const char *input_args) +const struct rte_cryptodev_symmetric_capability __vsym * +rte_cryptodev_sym_capability_get_v20(uint8_t dev_id, + const struct rte_cryptodev_sym_capability_idx *idx) { - struct rte_kvargs *kvlist = NULL; - int ret = 0; - - if (params == NULL) - return -EINVAL; + const struct rte_cryptodev_capabilities *capability; + struct rte_cryptodev_info dev_info; + int i = 0; - if (input_args) { - kvlist = rte_kvargs_parse(input_args, - cryptodev_vdev_valid_params); - if (kvlist == NULL) - return -1; + rte_cryptodev_info_get_v20(dev_id, &dev_info); - ret = rte_kvargs_process(kvlist, - RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG, - &parse_integer_arg, - ¶ms->max_nb_queue_pairs); - if (ret < 0) - goto free_kvlist; + while ((capability = &dev_info.capabilities[i++])->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; - ret = rte_kvargs_process(kvlist, - RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG, - &parse_integer_arg, - ¶ms->max_nb_sessions); - if (ret < 0) - goto free_kvlist; + if (capability->sym.xform_type != idx->type) + continue; - ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID, - &parse_integer_arg, - ¶ms->socket_id); - if (ret < 0) - goto free_kvlist; + if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH && + capability->sym.auth.algo == idx->algo.auth) + return &capability->sym; - ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME, - &parse_name_arg, - params); - if (ret < 0) - goto free_kvlist; + if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + capability->sym.cipher.algo == idx->algo.cipher) + return &capability->sym; - if (params->socket_id >= number_of_sockets()) { - CDEV_LOG_ERR("Invalid socket id specified to create " - "the virtual crypto device on"); - goto free_kvlist; - } + if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD && + capability->sym.aead.algo == idx->algo.aead) + return &capability->sym; } -free_kvlist: - rte_kvargs_free(kvlist); - return ret; + return NULL; } +VERSION_SYMBOL(rte_cryptodev_sym_capability_get, _v20, 20.0); -const struct rte_cryptodev_symmetric_capability * -rte_cryptodev_sym_capability_get(uint8_t dev_id, +const struct rte_cryptodev_symmetric_capability __vsym * +rte_cryptodev_sym_capability_get_v21(uint8_t dev_id, const struct rte_cryptodev_sym_capability_idx *idx) { const struct rte_cryptodev_capabilities *capability; @@ -363,25 +351,76 @@ rte_cryptodev_sym_capability_get(uint8_t dev_id, if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER && capability->sym.cipher.algo == idx->algo.cipher) return &capability->sym; + + if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD && + capability->sym.aead.algo == idx->algo.aead) + return &capability->sym; } return NULL; +} +MAP_STATIC_SYMBOL(const struct rte_cryptodev_symmetric_capability * + rte_cryptodev_sym_capability_get(uint8_t dev_id, + const struct rte_cryptodev_sym_capability_idx *idx), + rte_cryptodev_sym_capability_get_v21); +BIND_DEFAULT_SYMBOL(rte_cryptodev_sym_capability_get, _v21, 21); + +static int +param_range_check(uint16_t size, const struct rte_crypto_param_range *range) +{ + unsigned int next_size; + + /* Check lower/upper bounds */ + if (size < range->min) + return -1; + + if (size > range->max) + return -1; + + /* If range is actually only one value, size is correct */ + if (range->increment == 0) + return 0; + /* Check if value is one of the supported sizes */ + for (next_size = range->min; next_size <= range->max; + next_size += range->increment) + if (size == next_size) + return 0; + + return -1; } -#define param_range_check(x, y) \ - (((x < y.min) || (x > y.max)) || \ - (y.increment != 0 && (x % y.increment) != 0)) +const struct rte_cryptodev_asymmetric_xform_capability * +rte_cryptodev_asym_capability_get(uint8_t dev_id, + const struct rte_cryptodev_asym_capability_idx *idx) +{ + const struct rte_cryptodev_capabilities *capability; + struct rte_cryptodev_info dev_info; + unsigned int i = 0; + + memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); + rte_cryptodev_info_get(dev_id, &dev_info); + + while ((capability = &dev_info.capabilities[i++])->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC) + continue; + + if (capability->asym.xform_capa.xform_type == idx->type) + return &capability->asym.xform_capa; + } + return NULL; +}; int rte_cryptodev_sym_capability_check_cipher( const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t iv_size) { - if (param_range_check(key_size, capability->cipher.key_size)) + if (param_range_check(key_size, &capability->cipher.key_size) != 0) return -1; - if (param_range_check(iv_size, capability->cipher.iv_size)) + if (param_range_check(iv_size, &capability->cipher.iv_size) != 0) return -1; return 0; @@ -390,19 +429,75 @@ rte_cryptodev_sym_capability_check_cipher( int rte_cryptodev_sym_capability_check_auth( const struct rte_cryptodev_symmetric_capability *capability, - uint16_t key_size, uint16_t digest_size, uint16_t aad_size) + uint16_t key_size, uint16_t digest_size, uint16_t iv_size) +{ + if (param_range_check(key_size, &capability->auth.key_size) != 0) + return -1; + + if (param_range_check(digest_size, &capability->auth.digest_size) != 0) + return -1; + + if (param_range_check(iv_size, &capability->auth.iv_size) != 0) + return -1; + + return 0; +} + +int +rte_cryptodev_sym_capability_check_aead( + const struct rte_cryptodev_symmetric_capability *capability, + uint16_t key_size, uint16_t digest_size, uint16_t aad_size, + uint16_t iv_size) { - if (param_range_check(key_size, capability->auth.key_size)) + if (param_range_check(key_size, &capability->aead.key_size) != 0) + return -1; + + if (param_range_check(digest_size, &capability->aead.digest_size) != 0) return -1; - if (param_range_check(digest_size, capability->auth.digest_size)) + if (param_range_check(aad_size, &capability->aead.aad_size) != 0) return -1; - if (param_range_check(aad_size, capability->auth.aad_size)) + if (param_range_check(iv_size, &capability->aead.iv_size) != 0) return -1; return 0; } +int +rte_cryptodev_asym_xform_capability_check_optype( + const struct rte_cryptodev_asymmetric_xform_capability *capability, + enum rte_crypto_asym_op_type op_type) +{ + if (capability->op_types & (1 << op_type)) + return 1; + + return 0; +} + +int +rte_cryptodev_asym_xform_capability_check_modlen( + const struct rte_cryptodev_asymmetric_xform_capability *capability, + uint16_t modlen) +{ + /* no need to check for limits, if min or max = 0 */ + if (capability->modlen.min != 0) { + if (modlen < capability->modlen.min) + return -1; + } + + if (capability->modlen.max != 0) { + if (modlen > capability->modlen.max) + return -1; + } + + /* in any case, check if given modlen is module increment */ + if (capability->modlen.increment != 0) { + if (modlen % (capability->modlen.increment)) + return -1; + } + + return 0; +} const char * @@ -421,31 +516,51 @@ rte_cryptodev_get_feature_name(uint64_t flag) return "CPU_AVX"; case RTE_CRYPTODEV_FF_CPU_AVX2: return "CPU_AVX2"; + case RTE_CRYPTODEV_FF_CPU_AVX512: + return "CPU_AVX512"; case RTE_CRYPTODEV_FF_CPU_AESNI: return "CPU_AESNI"; case RTE_CRYPTODEV_FF_HW_ACCELERATED: return "HW_ACCELERATED"; - case RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER: - return "MBUF_SCATTER_GATHER"; + case RTE_CRYPTODEV_FF_IN_PLACE_SGL: + return "IN_PLACE_SGL"; + case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT: + return "OOP_SGL_IN_SGL_OUT"; + case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT: + return "OOP_SGL_IN_LB_OUT"; + case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT: + return "OOP_LB_IN_SGL_OUT"; + case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT: + return "OOP_LB_IN_LB_OUT"; case RTE_CRYPTODEV_FF_CPU_NEON: return "CPU_NEON"; case RTE_CRYPTODEV_FF_CPU_ARM_CE: return "CPU_ARM_CE"; + case RTE_CRYPTODEV_FF_SECURITY: + return "SECURITY_PROTOCOL"; + case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP: + return "RSA_PRIV_OP_KEY_EXP"; + case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT: + return "RSA_PRIV_OP_KEY_QT"; + case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED: + return "DIGEST_ENCRYPTED"; + case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO: + return "SYM_CPU_CRYPTO"; + case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS: + return "ASYM_SESSIONLESS"; + case RTE_CRYPTODEV_FF_SYM_SESSIONLESS: + return "SYM_SESSIONLESS"; + case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA: + return "NON_BYTE_ALIGNED_DATA"; default: return NULL; } } -int -rte_cryptodev_create_vdev(const char *name, const char *args) -{ - return rte_eal_vdev_init(name, args); -} - struct rte_cryptodev * rte_cryptodev_pmd_get_dev(uint8_t dev_id) { - return &rte_cryptodev_globals->devs[dev_id]; + return &cryptodev_globals.devs[dev_id]; } struct rte_cryptodev * @@ -457,8 +572,8 @@ rte_cryptodev_pmd_get_named_dev(const char *name) if (name == NULL) return NULL; - for (i = 0; i < rte_cryptodev_globals->max_devs; i++) { - dev = &rte_cryptodev_globals->devs[i]; + for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { + dev = &cryptodev_globals.devs[i]; if ((dev->attached == RTE_CRYPTODEV_ATTACHED) && (strcmp(dev->data->name, name) == 0)) @@ -468,12 +583,22 @@ rte_cryptodev_pmd_get_named_dev(const char *name) return NULL; } +static inline uint8_t +rte_cryptodev_is_valid_device_data(uint8_t dev_id) +{ + if (dev_id >= RTE_CRYPTO_MAX_DEVS || + rte_crypto_devices[dev_id].data == NULL) + return 0; + + return 1; +} + unsigned int rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id) { struct rte_cryptodev *dev = NULL; - if (dev_id >= rte_cryptodev_globals->nb_devs) + if (!rte_cryptodev_is_valid_device_data(dev_id)) return 0; dev = rte_cryptodev_pmd_get_dev(dev_id); @@ -492,12 +617,15 @@ rte_cryptodev_get_dev_id(const char *name) if (name == NULL) return -1; - for (i = 0; i < rte_cryptodev_globals->nb_devs; i++) - if ((strcmp(rte_cryptodev_globals->devs[i].data->name, name) + for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { + if (!rte_cryptodev_is_valid_device_data(i)) + continue; + if ((strcmp(cryptodev_globals.devs[i].data->name, name) == 0) && - (rte_cryptodev_globals->devs[i].attached == + (cryptodev_globals.devs[i].attached == RTE_CRYPTODEV_ATTACHED)) return i; + } return -1; } @@ -505,17 +633,17 @@ rte_cryptodev_get_dev_id(const char *name) uint8_t rte_cryptodev_count(void) { - return rte_cryptodev_globals->nb_devs; + return cryptodev_globals.nb_devs; } uint8_t -rte_cryptodev_count_devtype(enum rte_cryptodev_type type) +rte_cryptodev_device_count_by_driver(uint8_t driver_id) { uint8_t i, dev_count = 0; - for (i = 0; i < rte_cryptodev_globals->max_devs; i++) - if (rte_cryptodev_globals->devs[i].dev_type == type && - rte_cryptodev_globals->devs[i].attached == + for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) + if (cryptodev_globals.devs[i].driver_id == driver_id && + cryptodev_globals.devs[i].attached == RTE_CRYPTODEV_ATTACHED) dev_count++; @@ -523,25 +651,22 @@ rte_cryptodev_count_devtype(enum rte_cryptodev_type type) } uint8_t -rte_cryptodev_devices_get(const char *dev_name, uint8_t *devices, +rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, uint8_t nb_devices) { uint8_t i, count = 0; - struct rte_cryptodev *devs = rte_cryptodev_globals->devs; - uint8_t max_devs = rte_cryptodev_globals->max_devs; + struct rte_cryptodev *devs = cryptodev_globals.devs; - for (i = 0; i < max_devs && count < nb_devices; i++) { + for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) { + if (!rte_cryptodev_is_valid_device_data(i)) + continue; if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) { - const struct rte_cryptodev_driver *drv = devs[i].driver; int cmp; - if (drv) - cmp = strncmp(drv->pci_drv.driver.name, - dev_name, strlen(dev_name)); - else - cmp = strncmp(devs[i].data->name, - dev_name, strlen(dev_name)); + cmp = strncmp(devs[i].device->driver->name, + driver_name, + strlen(driver_name) + 1); if (cmp == 0) devices[count++] = devs[i].data->dev_id; @@ -551,6 +676,17 @@ rte_cryptodev_devices_get(const char *dev_name, uint8_t *devices, return count; } +void * +rte_cryptodev_get_sec_ctx(uint8_t dev_id) +{ + if (dev_id < RTE_CRYPTO_MAX_DEVS && + (rte_crypto_devices[dev_id].feature_flags & + RTE_CRYPTODEV_FF_SECURITY)) + return rte_crypto_devices[dev_id].security_ctx; + + return NULL; +} + int rte_cryptodev_socket_id(uint8_t dev_id) { @@ -568,7 +704,7 @@ static inline int rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data, int socket_id) { - char mz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + char mz_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; int n; @@ -594,6 +730,31 @@ rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data, return 0; } +static inline int +rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data) +{ + char mz_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + int n; + + /* generate memzone name */ + n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); + if (n >= (int)sizeof(mz_name)) + return -EINVAL; + + mz = rte_memzone_lookup(mz_name); + if (mz == NULL) + return -ENOMEM; + + RTE_ASSERT(*data == mz->addr); + *data = NULL; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + return rte_memzone_free(mz); + + return 0; +} + static uint8_t rte_cryptodev_find_free_device_index(void) { @@ -628,23 +789,28 @@ rte_cryptodev_pmd_allocate(const char *name, int socket_id) cryptodev = rte_cryptodev_pmd_get_dev(dev_id); if (cryptodev->data == NULL) { - struct rte_cryptodev_data *cryptodev_data = - cryptodev_globals.data[dev_id]; + struct rte_cryptodev_data **cryptodev_data = + &cryptodev_globals.data[dev_id]; - int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data, + int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data, socket_id); - if (retval < 0 || cryptodev_data == NULL) + if (retval < 0 || *cryptodev_data == NULL) return NULL; - cryptodev->data = cryptodev_data; + cryptodev->data = *cryptodev_data; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + strlcpy(cryptodev->data->name, name, + RTE_CRYPTODEV_NAME_MAX_LEN); - snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN, - "%s", name); + cryptodev->data->dev_id = dev_id; + cryptodev->data->socket_id = socket_id; + cryptodev->data->dev_started = 0; + } - cryptodev->data->dev_id = dev_id; - cryptodev->data->socket_id = socket_id; - cryptodev->data->dev_started = 0; + /* init user callbacks */ + TAILQ_INIT(&(cryptodev->link_intr_cbs)); cryptodev->attached = RTE_CRYPTODEV_ATTACHED; @@ -658,11 +824,21 @@ int rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev) { int ret; + uint8_t dev_id; if (cryptodev == NULL) return -EINVAL; - ret = rte_cryptodev_close(cryptodev->data->dev_id); + dev_id = cryptodev->data->dev_id; + + /* Close device only if device operations have been set */ + if (cryptodev->dev_ops) { + ret = rte_cryptodev_close(dev_id); + if (ret < 0) + return ret; + } + + ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]); if (ret < 0) return ret; @@ -671,168 +847,41 @@ rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev) return 0; } -struct rte_cryptodev * -rte_cryptodev_pmd_virtual_dev_init(const char *name, size_t dev_private_size, - int socket_id) +uint16_t +rte_cryptodev_queue_pair_count(uint8_t dev_id) { - struct rte_cryptodev *cryptodev; - - /* allocate device structure */ - cryptodev = rte_cryptodev_pmd_allocate(name, socket_id); - if (cryptodev == NULL) - return NULL; - - /* allocate private device structure */ - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - cryptodev->data->dev_private = - rte_zmalloc_socket("cryptodev device private", - dev_private_size, - RTE_CACHE_LINE_SIZE, - socket_id); + struct rte_cryptodev *dev; - if (cryptodev->data->dev_private == NULL) - rte_panic("Cannot allocate memzone for private device" - " data"); + if (!rte_cryptodev_is_valid_device_data(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return 0; } - /* initialise user call-back tail queue */ - TAILQ_INIT(&(cryptodev->link_intr_cbs)); - - return cryptodev; + dev = &rte_crypto_devices[dev_id]; + return dev->data->nb_queue_pairs; } -int -rte_cryptodev_pci_probe(struct rte_pci_driver *pci_drv, - struct rte_pci_device *pci_dev) +static int +rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs, + int socket_id) { - struct rte_cryptodev_driver *cryptodrv; - struct rte_cryptodev *cryptodev; + struct rte_cryptodev_info dev_info; + void **qp; + unsigned i; - char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + if ((dev == NULL) || (nb_qpairs < 1)) { + CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u", + dev, nb_qpairs); + return -EINVAL; + } - int retval; + CDEV_LOG_DEBUG("Setup %d queues pairs on device %u", + nb_qpairs, dev->data->dev_id); - cryptodrv = (struct rte_cryptodev_driver *)pci_drv; - if (cryptodrv == NULL) - return -ENODEV; + memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); - rte_eal_pci_device_name(&pci_dev->addr, cryptodev_name, - sizeof(cryptodev_name)); - - cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); - if (cryptodev == NULL) - return -ENOMEM; - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - cryptodev->data->dev_private = - rte_zmalloc_socket( - "cryptodev private structure", - cryptodrv->dev_private_size, - RTE_CACHE_LINE_SIZE, - rte_socket_id()); - - if (cryptodev->data->dev_private == NULL) - rte_panic("Cannot allocate memzone for private " - "device data"); - } - - cryptodev->device = &pci_dev->device; - cryptodev->driver = cryptodrv; - - /* init user callbacks */ - TAILQ_INIT(&(cryptodev->link_intr_cbs)); - - /* Invoke PMD device initialization function */ - retval = (*cryptodrv->cryptodev_init)(cryptodrv, cryptodev); - if (retval == 0) - return 0; - - CDEV_LOG_ERR("driver %s: crypto_dev_init(vendor_id=0x%x device_id=0x%x)" - " failed", pci_drv->driver.name, - (unsigned) pci_dev->id.vendor_id, - (unsigned) pci_dev->id.device_id); - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(cryptodev->data->dev_private); - - cryptodev->attached = RTE_CRYPTODEV_DETACHED; - cryptodev_globals.nb_devs--; - - return -ENXIO; -} - -int -rte_cryptodev_pci_remove(struct rte_pci_device *pci_dev) -{ - const struct rte_cryptodev_driver *cryptodrv; - struct rte_cryptodev *cryptodev; - char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; - int ret; - - if (pci_dev == NULL) - return -EINVAL; - - rte_eal_pci_device_name(&pci_dev->addr, cryptodev_name, - sizeof(cryptodev_name)); - - cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name); - if (cryptodev == NULL) - return -ENODEV; - - cryptodrv = (const struct rte_cryptodev_driver *)pci_dev->driver; - if (cryptodrv == NULL) - return -ENODEV; - - /* Invoke PMD device uninit function */ - if (*cryptodrv->cryptodev_uninit) { - ret = (*cryptodrv->cryptodev_uninit)(cryptodrv, cryptodev); - if (ret) - return ret; - } - - /* free crypto device */ - rte_cryptodev_pmd_release_device(cryptodev); - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(cryptodev->data->dev_private); - - cryptodev->device = NULL; - cryptodev->driver = NULL; - cryptodev->data = NULL; - - return 0; -} - -uint16_t -rte_cryptodev_queue_pair_count(uint8_t dev_id) -{ - struct rte_cryptodev *dev; - - dev = &rte_crypto_devices[dev_id]; - return dev->data->nb_queue_pairs; -} - -static int -rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs, - int socket_id) -{ - struct rte_cryptodev_info dev_info; - void **qp; - unsigned i; - - if ((dev == NULL) || (nb_qpairs < 1)) { - CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u", - dev, nb_qpairs); - return -EINVAL; - } - - CDEV_LOG_DEBUG("Setup %d queues pairs on device %u", - nb_qpairs, dev->data->dev_id); - - memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); - (*dev->dev_ops->dev_infos_get)(dev, &dev_info); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); + (*dev->dev_ops->dev_infos_get)(dev, &dev_info); if (nb_qpairs > (dev_info.max_nb_queue_pairs)) { CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u", @@ -890,54 +939,6 @@ rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs, return 0; } -int -rte_cryptodev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id) -{ - struct rte_cryptodev *dev; - - if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { - CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); - return -EINVAL; - } - - dev = &rte_crypto_devices[dev_id]; - if (queue_pair_id >= dev->data->nb_queue_pairs) { - CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); - return -EINVAL; - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_start, -ENOTSUP); - - return dev->dev_ops->queue_pair_start(dev, queue_pair_id); - -} - -int -rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id) -{ - struct rte_cryptodev *dev; - - if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { - CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); - return -EINVAL; - } - - dev = &rte_crypto_devices[dev_id]; - if (queue_pair_id >= dev->data->nb_queue_pairs) { - CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); - return -EINVAL; - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_stop, -ENOTSUP); - - return dev->dev_ops->queue_pair_stop(dev, queue_pair_id); - -} - -static int -rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev, - unsigned nb_objs, unsigned obj_cache_size, int socket_id); - int rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config) { @@ -957,6 +958,8 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config) return -EBUSY; } + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); + /* Setup new number of queue pairs and reconfigure device. */ diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs, config->socket_id); @@ -966,11 +969,8 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config) return diag; } - /* Setup Session mempool for device */ - return rte_cryptodev_sym_session_pool_create(dev, - config->session_mp.nb_objs, - config->session_mp.cache_size, - config->socket_id); + rte_cryptodev_trace_configure(dev_id, config); + return (*dev->dev_ops->dev_configure)(dev, config); } @@ -998,6 +998,7 @@ rte_cryptodev_start(uint8_t dev_id) } diag = (*dev->dev_ops->dev_start)(dev); + rte_cryptodev_trace_start(dev_id, diag); if (diag == 0) dev->data->dev_started = 1; else @@ -1026,8 +1027,9 @@ rte_cryptodev_stop(uint8_t dev_id) return; } - dev->data->dev_started = 0; (*dev->dev_ops->dev_stop)(dev); + rte_cryptodev_trace_stop(dev_id); + dev->data->dev_started = 0; } int @@ -1063,6 +1065,13 @@ rte_cryptodev_close(uint8_t dev_id) RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); retval = (*dev->dev_ops->dev_close)(dev); + rte_cryptodev_trace_close(dev_id, retval); + + if (capability_copy[dev_id]) { + free(capability_copy[dev_id]); + capability_copy[dev_id] = NULL; + } + is_capability_checked[dev_id] = 0; if (retval < 0) return retval; @@ -1073,6 +1082,7 @@ rte_cryptodev_close(uint8_t dev_id) int rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) + { struct rte_cryptodev *dev; @@ -1087,6 +1097,42 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, return -EINVAL; } + if (!qp_conf) { + CDEV_LOG_ERR("qp_conf cannot be NULL\n"); + return -EINVAL; + } + + if ((qp_conf->mp_session && !qp_conf->mp_session_private) || + (!qp_conf->mp_session && qp_conf->mp_session_private)) { + CDEV_LOG_ERR("Invalid mempools\n"); + return -EINVAL; + } + + if (qp_conf->mp_session) { + struct rte_cryptodev_sym_session_pool_private_data *pool_priv; + uint32_t obj_size = qp_conf->mp_session->elt_size; + uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size; + struct rte_cryptodev_sym_session s = {0}; + + pool_priv = rte_mempool_get_priv(qp_conf->mp_session); + if (!pool_priv || qp_conf->mp_session->private_data_size < + sizeof(*pool_priv)) { + CDEV_LOG_ERR("Invalid mempool\n"); + return -EINVAL; + } + + s.nb_drivers = pool_priv->nb_drivers; + s.user_data_sz = pool_priv->user_data_sz; + + if ((rte_cryptodev_sym_get_existing_header_session_size(&s) > + obj_size) || (s.nb_drivers <= dev->driver_id) || + rte_cryptodev_sym_get_private_session_size(dev_id) > + obj_priv_size) { + CDEV_LOG_ERR("Invalid mempool\n"); + return -EINVAL; + } + } + if (dev->data->dev_started) { CDEV_LOG_ERR( "device %d must be stopped to allow configuration", dev_id); @@ -1095,6 +1141,7 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP); + rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf); return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf, socket_id); } @@ -1139,13 +1186,65 @@ rte_cryptodev_stats_reset(uint8_t dev_id) (*dev->dev_ops->stats_reset)(dev); } +static void +get_v20_capabilities(uint8_t dev_id, struct rte_cryptodev_info *dev_info) +{ + const struct rte_cryptodev_capabilities *capability; + uint8_t found_invalid_capa = 0; + uint8_t counter = 0; + + for (capability = dev_info->capabilities; + capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; + ++capability, ++counter) { + if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && + capability->sym.xform_type == + RTE_CRYPTO_SYM_XFORM_AEAD + && capability->sym.aead.algo >= + RTE_CRYPTO_AEAD_CHACHA20_POLY1305) { + found_invalid_capa = 1; + counter--; + } + } + is_capability_checked[dev_id] = 1; + if (!found_invalid_capa) + return; + capability_copy[dev_id] = malloc(counter * + sizeof(struct rte_cryptodev_capabilities)); + if (capability_copy[dev_id] == NULL) { + /* + * error case - no memory to store the trimmed + * list, so have to return an empty list + */ + dev_info->capabilities = + cryptodev_undefined_capabilities; + is_capability_checked[dev_id] = 0; + } else { + counter = 0; + for (capability = dev_info->capabilities; + capability->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED; + capability++) { + if (!(capability->op == + RTE_CRYPTO_OP_TYPE_SYMMETRIC + && capability->sym.xform_type == + RTE_CRYPTO_SYM_XFORM_AEAD + && capability->sym.aead.algo >= + RTE_CRYPTO_AEAD_CHACHA20_POLY1305)) { + capability_copy[dev_id][counter++] = + *capability; + } + } + dev_info->capabilities = + capability_copy[dev_id]; + } +} -void -rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) +void __vsym +rte_cryptodev_info_get_v20(uint8_t dev_id, struct rte_cryptodev_info *dev_info) { struct rte_cryptodev *dev; - if (dev_id >= cryptodev_globals.nb_devs) { + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); return; } @@ -1157,11 +1256,40 @@ rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); (*dev->dev_ops->dev_infos_get)(dev, dev_info); - dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); - if (dev->driver) - dev_info->driver_name = dev->driver->pci_drv.driver.name; + if (capability_copy[dev_id] == NULL) { + if (!is_capability_checked[dev_id]) + get_v20_capabilities(dev_id, dev_info); + } else + dev_info->capabilities = capability_copy[dev_id]; + + dev_info->driver_name = dev->device->driver->name; + dev_info->device = dev->device; } +VERSION_SYMBOL(rte_cryptodev_info_get, _v20, 20.0); +void __vsym +rte_cryptodev_info_get_v21(uint8_t dev_id, struct rte_cryptodev_info *dev_info) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); + return; + } + + dev = &rte_crypto_devices[dev_id]; + + memset(dev_info, 0, sizeof(struct rte_cryptodev_info)); + + RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); + (*dev->dev_ops->dev_infos_get)(dev, dev_info); + + dev_info->driver_name = dev->device->driver->name; + dev_info->device = dev->device; +} +MAP_STATIC_SYMBOL(void rte_cryptodev_info_get(uint8_t dev_id, + struct rte_cryptodev_info *dev_info), rte_cryptodev_info_get_v21); +BIND_DEFAULT_SYMBOL(rte_cryptodev_info_get, _v21, 21); int rte_cryptodev_callback_register(uint8_t dev_id, @@ -1275,149 +1403,439 @@ rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev, } -static void -rte_cryptodev_sym_session_init(struct rte_mempool *mp, - void *opaque_arg, - void *_sess, - __rte_unused unsigned i) +int +rte_cryptodev_sym_session_init(uint8_t dev_id, + struct rte_cryptodev_sym_session *sess, + struct rte_crypto_sym_xform *xforms, + struct rte_mempool *mp) { - struct rte_cryptodev_sym_session *sess = _sess; - struct rte_cryptodev *dev = opaque_arg; + struct rte_cryptodev *dev; + uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size( + dev_id); + uint8_t index; + int ret; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; + } - memset(sess, 0, mp->elt_size); + dev = rte_cryptodev_pmd_get_dev(dev_id); - sess->dev_id = dev->data->dev_id; - sess->dev_type = dev->dev_type; - sess->mp = mp; + if (sess == NULL || xforms == NULL || dev == NULL) + return -EINVAL; - if (dev->dev_ops->session_initialize) - (*dev->dev_ops->session_initialize)(mp, sess); + if (mp->elt_size < sess_priv_sz) + return -EINVAL; + + index = dev->driver_id; + if (index >= sess->nb_drivers) + return -EINVAL; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP); + + if (sess->sess_data[index].refcnt == 0) { + ret = dev->dev_ops->sym_session_configure(dev, xforms, + sess, mp); + if (ret < 0) { + CDEV_LOG_ERR( + "dev_id %d failed to configure session details", + dev_id); + return ret; + } + } + + rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp); + sess->sess_data[index].refcnt++; + return 0; } -static int -rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev, - unsigned nb_objs, unsigned obj_cache_size, int socket_id) +int +rte_cryptodev_asym_session_init(uint8_t dev_id, + struct rte_cryptodev_asym_session *sess, + struct rte_crypto_asym_xform *xforms, + struct rte_mempool *mp) { - char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN]; - unsigned priv_sess_size; + struct rte_cryptodev *dev; + uint8_t index; + int ret; - unsigned n = snprintf(mp_name, sizeof(mp_name), "cdev_%d_sess_mp", - dev->data->dev_id); - if (n > sizeof(mp_name)) { - CDEV_LOG_ERR("Unable to create unique name for session mempool"); - return -ENOMEM; + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; } - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_get_size, -ENOTSUP); - priv_sess_size = (*dev->dev_ops->session_get_size)(dev); - if (priv_sess_size == 0) { - CDEV_LOG_ERR("%s returned and invalid private session size ", - dev->data->name); - return -ENOMEM; - } + dev = rte_cryptodev_pmd_get_dev(dev_id); - unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) + - priv_sess_size; + if (sess == NULL || xforms == NULL || dev == NULL) + return -EINVAL; - dev->data->session_pool = rte_mempool_lookup(mp_name); - if (dev->data->session_pool != NULL) { - if ((dev->data->session_pool->elt_size != elt_size) || - (dev->data->session_pool->cache_size < - obj_cache_size) || - (dev->data->session_pool->size < nb_objs)) { - - CDEV_LOG_ERR("%s mempool already exists with different" - " initialization parameters", mp_name); - dev->data->session_pool = NULL; - return -ENOMEM; - } - } else { - dev->data->session_pool = rte_mempool_create( - mp_name, /* mempool name */ - nb_objs, /* number of elements*/ - elt_size, /* element size*/ - obj_cache_size, /* Cache size*/ - 0, /* private data size */ - NULL, /* obj initialization constructor */ - NULL, /* obj initialization constructor arg */ - rte_cryptodev_sym_session_init, - /**< obj constructor*/ - dev, /* obj constructor arg */ - socket_id, /* socket id */ - 0); /* flags */ - - if (dev->data->session_pool == NULL) { - CDEV_LOG_ERR("%s mempool allocation failed", mp_name); - return -ENOMEM; + index = dev->driver_id; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure, + -ENOTSUP); + + if (sess->sess_private_data[index] == NULL) { + ret = dev->dev_ops->asym_session_configure(dev, + xforms, + sess, mp); + if (ret < 0) { + CDEV_LOG_ERR( + "dev_id %d failed to configure session details", + dev_id); + return ret; } } - CDEV_LOG_DEBUG("%s mempool created!", mp_name); + rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp); return 0; } +struct rte_mempool * +rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, + uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size, + int socket_id) +{ + struct rte_mempool *mp; + struct rte_cryptodev_sym_session_pool_private_data *pool_priv; + uint32_t obj_sz; + + obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size; + if (obj_sz > elt_size) + CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size, + obj_sz); + else + obj_sz = elt_size; + + mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size, + (uint32_t)(sizeof(*pool_priv)), + NULL, NULL, NULL, NULL, + socket_id, 0); + if (mp == NULL) { + CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n", + __func__, name, rte_errno); + return NULL; + } + + pool_priv = rte_mempool_get_priv(mp); + if (!pool_priv) { + CDEV_LOG_ERR("%s(name=%s) failed to get private data\n", + __func__, name); + rte_mempool_free(mp); + return NULL; + } + + pool_priv->nb_drivers = nb_drivers; + pool_priv->user_data_sz = user_data_size; + + rte_cryptodev_trace_sym_session_pool_create(name, nb_elts, + elt_size, cache_size, user_data_size, mp); + return mp; +} + +static unsigned int +rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess) +{ + return (sizeof(sess->sess_data[0]) * sess->nb_drivers) + + sess->user_data_sz; +} + struct rte_cryptodev_sym_session * -rte_cryptodev_sym_session_create(uint8_t dev_id, - struct rte_crypto_sym_xform *xform) +rte_cryptodev_sym_session_create(struct rte_mempool *mp) { - struct rte_cryptodev *dev; struct rte_cryptodev_sym_session *sess; - void *_sess; + struct rte_cryptodev_sym_session_pool_private_data *pool_priv; - if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { - CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); + if (!mp) { + CDEV_LOG_ERR("Invalid mempool\n"); return NULL; } - dev = &rte_crypto_devices[dev_id]; + pool_priv = rte_mempool_get_priv(mp); + + if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) { + CDEV_LOG_ERR("Invalid mempool\n"); + return NULL; + } /* Allocate a session structure from the session pool */ - if (rte_mempool_get(dev->data->session_pool, &_sess)) { - CDEV_LOG_ERR("Couldn't get object from session mempool"); + if (rte_mempool_get(mp, (void **)&sess)) { + CDEV_LOG_ERR("couldn't get object from session mempool"); return NULL; } - sess = (struct rte_cryptodev_sym_session *)_sess; + sess->nb_drivers = pool_priv->nb_drivers; + sess->user_data_sz = pool_priv->user_data_sz; + sess->opaque_data = 0; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL); - if (dev->dev_ops->session_configure(dev, xform, sess->_private) == - NULL) { - CDEV_LOG_ERR("dev_id %d failed to configure session details", - dev_id); + /* Clear device session pointer. + * Include the flag indicating presence of user data + */ + memset(sess->sess_data, 0, + rte_cryptodev_sym_session_data_size(sess)); + + rte_cryptodev_trace_sym_session_create(mp, sess); + return sess; +} + +struct rte_cryptodev_asym_session * +rte_cryptodev_asym_session_create(struct rte_mempool *mp) +{ + struct rte_cryptodev_asym_session *sess; - /* Return session to mempool */ - rte_mempool_put(sess->mp, _sess); + /* Allocate a session structure from the session pool */ + if (rte_mempool_get(mp, (void **)&sess)) { + CDEV_LOG_ERR("couldn't get object from session mempool"); return NULL; } + /* Clear device session pointer. + * Include the flag indicating presence of private data + */ + memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t)); + + rte_cryptodev_trace_asym_session_create(mp, sess); return sess; } -struct rte_cryptodev_sym_session * -rte_cryptodev_sym_session_free(uint8_t dev_id, +int +rte_cryptodev_sym_session_clear(uint8_t dev_id, struct rte_cryptodev_sym_session *sess) { struct rte_cryptodev *dev; + uint8_t driver_id; if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { - CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); - return sess; + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; } - dev = &rte_crypto_devices[dev_id]; + dev = rte_cryptodev_pmd_get_dev(dev_id); - /* Check the session belongs to this device type */ - if (sess->dev_type != dev->dev_type) - return sess; + if (dev == NULL || sess == NULL) + return -EINVAL; + + driver_id = dev->driver_id; + if (sess->sess_data[driver_id].refcnt == 0) + return 0; + if (--sess->sess_data[driver_id].refcnt != 0) + return -EBUSY; - /* Let device implementation clear session material */ - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_clear, sess); - dev->dev_ops->session_clear(dev, (void *)sess->_private); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP); + + dev->dev_ops->sym_session_clear(dev, sess); + + rte_cryptodev_trace_sym_session_clear(dev_id, sess); + return 0; +} + +int +rte_cryptodev_asym_session_clear(uint8_t dev_id, + struct rte_cryptodev_asym_session *sess) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; + } + + dev = rte_cryptodev_pmd_get_dev(dev_id); + + if (dev == NULL || sess == NULL) + return -EINVAL; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP); + + dev->dev_ops->asym_session_clear(dev, sess); + + rte_cryptodev_trace_sym_session_clear(dev_id, sess); + return 0; +} + +int +rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess) +{ + uint8_t i; + struct rte_mempool *sess_mp; + + if (sess == NULL) + return -EINVAL; + + /* Check that all device private data has been freed */ + for (i = 0; i < sess->nb_drivers; i++) { + if (sess->sess_data[i].refcnt != 0) + return -EBUSY; + } /* Return session to mempool */ - rte_mempool_put(sess->mp, (void *)sess); + sess_mp = rte_mempool_from_obj(sess); + rte_mempool_put(sess_mp, sess); - return NULL; + rte_cryptodev_trace_sym_session_free(sess); + return 0; +} + +int +rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess) +{ + uint8_t i; + void *sess_priv; + struct rte_mempool *sess_mp; + + if (sess == NULL) + return -EINVAL; + + /* Check that all device private data has been freed */ + for (i = 0; i < nb_drivers; i++) { + sess_priv = get_asym_session_private_data(sess, i); + if (sess_priv != NULL) + return -EBUSY; + } + + /* Return session to mempool */ + sess_mp = rte_mempool_from_obj(sess); + rte_mempool_put(sess_mp, sess); + + rte_cryptodev_trace_asym_session_free(sess); + return 0; +} + +unsigned int +rte_cryptodev_sym_get_header_session_size(void) +{ + /* + * Header contains pointers to the private data of all registered + * drivers and all necessary information to ensure safely clear + * or free al session. + */ + struct rte_cryptodev_sym_session s = {0}; + + s.nb_drivers = nb_drivers; + + return (unsigned int)(sizeof(s) + + rte_cryptodev_sym_session_data_size(&s)); +} + +unsigned int +rte_cryptodev_sym_get_existing_header_session_size( + struct rte_cryptodev_sym_session *sess) +{ + if (!sess) + return 0; + else + return (unsigned int)(sizeof(*sess) + + rte_cryptodev_sym_session_data_size(sess)); +} + +unsigned int +rte_cryptodev_asym_get_header_session_size(void) +{ + /* + * Header contains pointers to the private data + * of all registered drivers, and a flag which + * indicates presence of private data + */ + return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t)); +} + +unsigned int +rte_cryptodev_sym_get_private_session_size(uint8_t dev_id) +{ + struct rte_cryptodev *dev; + unsigned int priv_sess_size; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) + return 0; + + dev = rte_cryptodev_pmd_get_dev(dev_id); + + if (*dev->dev_ops->sym_session_get_size == NULL) + return 0; + + priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev); + + return priv_sess_size; +} + +unsigned int +rte_cryptodev_asym_get_private_session_size(uint8_t dev_id) +{ + struct rte_cryptodev *dev; + unsigned int header_size = sizeof(void *) * nb_drivers; + unsigned int priv_sess_size; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) + return 0; + + dev = rte_cryptodev_pmd_get_dev(dev_id); + + if (*dev->dev_ops->asym_session_get_size == NULL) + return 0; + + priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev); + if (priv_sess_size < header_size) + return header_size; + + return priv_sess_size; + +} + +int +rte_cryptodev_sym_session_set_user_data( + struct rte_cryptodev_sym_session *sess, + void *data, + uint16_t size) +{ + if (sess == NULL) + return -EINVAL; + + if (sess->user_data_sz < size) + return -ENOMEM; + + rte_memcpy(sess->sess_data + sess->nb_drivers, data, size); + return 0; +} + +void * +rte_cryptodev_sym_session_get_user_data( + struct rte_cryptodev_sym_session *sess) +{ + if (sess == NULL || sess->user_data_sz == 0) + return NULL; + + return (void *)(sess->sess_data + sess->nb_drivers); +} + +static inline void +sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum) +{ + uint32_t i; + for (i = 0; i < vec->num; i++) + vec->status[i] = errnum; +} + +uint32_t +rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, + struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs, + struct rte_crypto_sym_vec *vec) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + sym_crypto_fill_status(vec, EINVAL); + return 0; + } + + dev = rte_cryptodev_pmd_get_dev(dev_id); + + if (*dev->dev_ops->sym_cpu_process == NULL || + !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) { + sym_crypto_fill_status(vec, ENOTSUP); + return 0; + } + + return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec); } /** Initialise rte_crypto_op mempool element */ @@ -1434,7 +1852,7 @@ rte_crypto_op_init(struct rte_mempool *mempool, __rte_crypto_op_reset(op, type); - op->phys_addr = rte_mem_virt2phy(_op_data); + op->phys_addr = rte_mem_virt2iova(_op_data); op->mempool = mempool; } @@ -1447,9 +1865,20 @@ rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type, struct rte_crypto_op_pool_private *priv; unsigned elt_size = sizeof(struct rte_crypto_op) + - sizeof(struct rte_crypto_sym_op) + priv_size; + if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { + elt_size += sizeof(struct rte_crypto_sym_op); + } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { + elt_size += sizeof(struct rte_crypto_asym_op); + } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) { + elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op), + sizeof(struct rte_crypto_asym_op)); + } else { + CDEV_LOG_ERR("Invalid op_type\n"); + return NULL; + } + /* lookup mempool in case already allocated */ struct rte_mempool *mp = rte_mempool_lookup(name); @@ -1519,3 +1948,67 @@ rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix) return -1; } + +TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver); + +static struct cryptodev_driver_list cryptodev_driver_list = + TAILQ_HEAD_INITIALIZER(cryptodev_driver_list); + +int +rte_cryptodev_driver_id_get(const char *name) +{ + struct cryptodev_driver *driver; + const char *driver_name; + + if (name == NULL) { + RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL"); + return -1; + } + + TAILQ_FOREACH(driver, &cryptodev_driver_list, next) { + driver_name = driver->driver->name; + if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) + return driver->id; + } + return -1; +} + +const char * +rte_cryptodev_name_get(uint8_t dev_id) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_is_valid_device_data(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return NULL; + } + + dev = rte_cryptodev_pmd_get_dev(dev_id); + if (dev == NULL) + return NULL; + + return dev->data->name; +} + +const char * +rte_cryptodev_driver_name_get(uint8_t driver_id) +{ + struct cryptodev_driver *driver; + + TAILQ_FOREACH(driver, &cryptodev_driver_list, next) + if (driver->id == driver_id) + return driver->driver->name; + return NULL; +} + +uint8_t +rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv, + const struct rte_driver *drv) +{ + crypto_drv->driver = drv; + crypto_drv->id = nb_drivers; + + TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next); + + return nb_drivers++; +}