X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_cryptodev%2Frte_cryptodev.c;h=1dd795bcb517d165974c9a119a969095906ac2c1;hb=a617494eeb01ff;hp=2a8e07d273c6377ee3b481653b8bcb602915a2d7;hpb=e764cd72a9bc697b84b055ad3cbb723860e256e4;p=dpdk.git diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c index 2a8e07d273..1dd795bcb5 100644 --- a/lib/librte_cryptodev/rte_cryptodev.c +++ b/lib/librte_cryptodev/rte_cryptodev.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2017 Intel Corporation + * Copyright(c) 2015-2020 Intel Corporation */ #include @@ -36,10 +36,13 @@ #include #include #include +#include +#include #include "rte_crypto.h" #include "rte_cryptodev.h" #include "rte_cryptodev_pmd.h" +#include "rte_cryptodev_trace.h" static uint8_t nb_drivers; @@ -50,13 +53,20 @@ struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices; static struct rte_cryptodev_global cryptodev_globals = { .devs = rte_crypto_devices, .data = { NULL }, - .nb_devs = 0, - .max_devs = RTE_CRYPTO_MAX_DEVS + .nb_devs = 0 }; /* spinlock for crypto device callbacks */ static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER; +static const struct rte_cryptodev_capabilities + cryptodev_undefined_capabilities[] = { + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +static struct rte_cryptodev_capabilities + *capability_copy[RTE_CRYPTO_MAX_DEVS]; +static uint8_t is_capability_checked[RTE_CRYPTO_MAX_DEVS]; /** * The user application callback description. @@ -152,6 +162,7 @@ const char * rte_crypto_aead_algorithm_strings[] = { [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm", [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm", + [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305" }; /** @@ -174,6 +185,8 @@ const char *rte_crypto_asym_xform_strings[] = { [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv", [RTE_CRYPTO_ASYM_XFORM_DH] = "dh", [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa", + [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa", + [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm", }; /** @@ -250,7 +263,7 @@ rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, return -1; } -int __rte_experimental +int rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, const char *xform_string) { @@ -278,15 +291,15 @@ rte_crypto_auth_operation_strings[] = { [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate" }; -const struct rte_cryptodev_symmetric_capability * -rte_cryptodev_sym_capability_get(uint8_t dev_id, +const struct rte_cryptodev_symmetric_capability __vsym * +rte_cryptodev_sym_capability_get_v20(uint8_t dev_id, const struct rte_cryptodev_sym_capability_idx *idx) { const struct rte_cryptodev_capabilities *capability; struct rte_cryptodev_info dev_info; int i = 0; - rte_cryptodev_info_get(dev_id, &dev_info); + rte_cryptodev_info_get_v20(dev_id, &dev_info); while ((capability = &dev_info.capabilities[i++])->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) { @@ -310,8 +323,47 @@ rte_cryptodev_sym_capability_get(uint8_t dev_id, } return NULL; +} +VERSION_SYMBOL(rte_cryptodev_sym_capability_get, _v20, 20.0); + +const struct rte_cryptodev_symmetric_capability __vsym * +rte_cryptodev_sym_capability_get_v21(uint8_t dev_id, + const struct rte_cryptodev_sym_capability_idx *idx) +{ + const struct rte_cryptodev_capabilities *capability; + struct rte_cryptodev_info dev_info; + int i = 0; + + rte_cryptodev_info_get(dev_id, &dev_info); + + while ((capability = &dev_info.capabilities[i++])->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (capability->sym.xform_type != idx->type) + continue; + + if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH && + capability->sym.auth.algo == idx->algo.auth) + return &capability->sym; + + if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + capability->sym.cipher.algo == idx->algo.cipher) + return &capability->sym; + if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD && + capability->sym.aead.algo == idx->algo.aead) + return &capability->sym; + } + + return NULL; } +MAP_STATIC_SYMBOL(const struct rte_cryptodev_symmetric_capability * + rte_cryptodev_sym_capability_get(uint8_t dev_id, + const struct rte_cryptodev_sym_capability_idx *idx), + rte_cryptodev_sym_capability_get_v21); +BIND_DEFAULT_SYMBOL(rte_cryptodev_sym_capability_get, _v21, 21); static int param_range_check(uint16_t size, const struct rte_crypto_param_range *range) @@ -338,7 +390,7 @@ param_range_check(uint16_t size, const struct rte_crypto_param_range *range) return -1; } -const struct rte_cryptodev_asymmetric_xform_capability * __rte_experimental +const struct rte_cryptodev_asymmetric_xform_capability * rte_cryptodev_asym_capability_get(uint8_t dev_id, const struct rte_cryptodev_asym_capability_idx *idx) { @@ -411,7 +463,7 @@ rte_cryptodev_sym_capability_check_aead( return 0; } -int __rte_experimental +int rte_cryptodev_asym_xform_capability_check_optype( const struct rte_cryptodev_asymmetric_xform_capability *capability, enum rte_crypto_asym_op_type op_type) @@ -422,7 +474,7 @@ rte_cryptodev_asym_xform_capability_check_optype( return 0; } -int __rte_experimental +int rte_cryptodev_asym_xform_capability_check_modlen( const struct rte_cryptodev_asymmetric_xform_capability *capability, uint16_t modlen) @@ -486,6 +538,20 @@ rte_cryptodev_get_feature_name(uint64_t flag) return "CPU_ARM_CE"; case RTE_CRYPTODEV_FF_SECURITY: return "SECURITY_PROTOCOL"; + case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP: + return "RSA_PRIV_OP_KEY_EXP"; + case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT: + return "RSA_PRIV_OP_KEY_QT"; + case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED: + return "DIGEST_ENCRYPTED"; + case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO: + return "SYM_CPU_CRYPTO"; + case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS: + return "ASYM_SESSIONLESS"; + case RTE_CRYPTODEV_FF_SYM_SESSIONLESS: + return "SYM_SESSIONLESS"; + case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA: + return "NON_BYTE_ALIGNED_DATA"; default: return NULL; } @@ -506,7 +572,7 @@ rte_cryptodev_pmd_get_named_dev(const char *name) if (name == NULL) return NULL; - for (i = 0; i < cryptodev_globals.max_devs; i++) { + for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { dev = &cryptodev_globals.devs[i]; if ((dev->attached == RTE_CRYPTODEV_ATTACHED) && @@ -517,12 +583,22 @@ rte_cryptodev_pmd_get_named_dev(const char *name) return NULL; } +static inline uint8_t +rte_cryptodev_is_valid_device_data(uint8_t dev_id) +{ + if (dev_id >= RTE_CRYPTO_MAX_DEVS || + rte_crypto_devices[dev_id].data == NULL) + return 0; + + return 1; +} + unsigned int rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id) { struct rte_cryptodev *dev = NULL; - if (dev_id >= cryptodev_globals.nb_devs) + if (!rte_cryptodev_is_valid_device_data(dev_id)) return 0; dev = rte_cryptodev_pmd_get_dev(dev_id); @@ -541,12 +617,15 @@ rte_cryptodev_get_dev_id(const char *name) if (name == NULL) return -1; - for (i = 0; i < cryptodev_globals.nb_devs; i++) + for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { + if (!rte_cryptodev_is_valid_device_data(i)) + continue; if ((strcmp(cryptodev_globals.devs[i].data->name, name) == 0) && (cryptodev_globals.devs[i].attached == RTE_CRYPTODEV_ATTACHED)) return i; + } return -1; } @@ -562,7 +641,7 @@ rte_cryptodev_device_count_by_driver(uint8_t driver_id) { uint8_t i, dev_count = 0; - for (i = 0; i < cryptodev_globals.max_devs; i++) + for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) if (cryptodev_globals.devs[i].driver_id == driver_id && cryptodev_globals.devs[i].attached == RTE_CRYPTODEV_ATTACHED) @@ -577,16 +656,17 @@ rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, { uint8_t i, count = 0; struct rte_cryptodev *devs = cryptodev_globals.devs; - uint8_t max_devs = cryptodev_globals.max_devs; - for (i = 0; i < max_devs && count < nb_devices; i++) { + for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) { + if (!rte_cryptodev_is_valid_device_data(i)) + continue; if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) { int cmp; cmp = strncmp(devs[i].device->driver->name, driver_name, - strlen(driver_name)); + strlen(driver_name) + 1); if (cmp == 0) devices[count++] = devs[i].data->dev_id; @@ -599,8 +679,9 @@ rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, void * rte_cryptodev_get_sec_ctx(uint8_t dev_id) { - if (rte_crypto_devices[dev_id].feature_flags & - RTE_CRYPTODEV_FF_SECURITY) + if (dev_id < RTE_CRYPTO_MAX_DEVS && + (rte_crypto_devices[dev_id].feature_flags & + RTE_CRYPTODEV_FF_SECURITY)) return rte_crypto_devices[dev_id].security_ctx; return NULL; @@ -623,7 +704,7 @@ static inline int rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data, int socket_id) { - char mz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + char mz_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; int n; @@ -636,8 +717,13 @@ rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data, mz = rte_memzone_reserve(mz_name, sizeof(struct rte_cryptodev_data), socket_id, 0); - } else + CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)", + mz_name, mz); + } else { mz = rte_memzone_lookup(mz_name); + CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)", + mz_name, mz); + } if (mz == NULL) return -ENOMEM; @@ -649,6 +735,37 @@ rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data, return 0; } +static inline int +rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data) +{ + char mz_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + int n; + + /* generate memzone name */ + n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); + if (n >= (int)sizeof(mz_name)) + return -EINVAL; + + mz = rte_memzone_lookup(mz_name); + if (mz == NULL) + return -ENOMEM; + + RTE_ASSERT(*data == mz->addr); + *data = NULL; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)", + mz_name, mz); + return rte_memzone_free(mz); + } else { + CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)", + mz_name, mz); + } + + return 0; +} + static uint8_t rte_cryptodev_find_free_device_index(void) { @@ -683,23 +800,32 @@ rte_cryptodev_pmd_allocate(const char *name, int socket_id) cryptodev = rte_cryptodev_pmd_get_dev(dev_id); if (cryptodev->data == NULL) { - struct rte_cryptodev_data *cryptodev_data = - cryptodev_globals.data[dev_id]; + struct rte_cryptodev_data **cryptodev_data = + &cryptodev_globals.data[dev_id]; - int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data, + int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data, socket_id); - if (retval < 0 || cryptodev_data == NULL) + if (retval < 0 || *cryptodev_data == NULL) return NULL; - cryptodev->data = cryptodev_data; + cryptodev->data = *cryptodev_data; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + strlcpy(cryptodev->data->name, name, + RTE_CRYPTODEV_NAME_MAX_LEN); - snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN, - "%s", name); + cryptodev->data->dev_id = dev_id; + cryptodev->data->socket_id = socket_id; + cryptodev->data->dev_started = 0; + CDEV_LOG_DEBUG("PRIMARY:init data"); + } - cryptodev->data->dev_id = dev_id; - cryptodev->data->socket_id = socket_id; - cryptodev->data->dev_started = 0; + CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d", + cryptodev->data->name, + cryptodev->data->dev_id, + cryptodev->data->socket_id, + cryptodev->data->dev_started); /* init user callbacks */ TAILQ_INIT(&(cryptodev->link_intr_cbs)); @@ -716,17 +842,24 @@ int rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev) { int ret; + uint8_t dev_id; if (cryptodev == NULL) return -EINVAL; + dev_id = cryptodev->data->dev_id; + /* Close device only if device operations have been set */ if (cryptodev->dev_ops) { - ret = rte_cryptodev_close(cryptodev->data->dev_id); + ret = rte_cryptodev_close(dev_id); if (ret < 0) return ret; } + ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]); + if (ret < 0) + return ret; + cryptodev->attached = RTE_CRYPTODEV_DETACHED; cryptodev_globals.nb_devs--; return 0; @@ -737,6 +870,11 @@ rte_cryptodev_queue_pair_count(uint8_t dev_id) { struct rte_cryptodev *dev; + if (!rte_cryptodev_is_valid_device_data(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return 0; + } + dev = &rte_crypto_devices[dev_id]; return dev->data->nb_queue_pairs; } @@ -849,6 +987,7 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config) return diag; } + rte_cryptodev_trace_configure(dev_id, config); return (*dev->dev_ops->dev_configure)(dev, config); } @@ -877,6 +1016,7 @@ rte_cryptodev_start(uint8_t dev_id) } diag = (*dev->dev_ops->dev_start)(dev); + rte_cryptodev_trace_start(dev_id, diag); if (diag == 0) dev->data->dev_started = 1; else @@ -906,6 +1046,7 @@ rte_cryptodev_stop(uint8_t dev_id) } (*dev->dev_ops->dev_stop)(dev); + rte_cryptodev_trace_stop(dev_id); dev->data->dev_started = 0; } @@ -942,6 +1083,13 @@ rte_cryptodev_close(uint8_t dev_id) RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); retval = (*dev->dev_ops->dev_close)(dev); + rte_cryptodev_trace_close(dev_id, retval); + + if (capability_copy[dev_id]) { + free(capability_copy[dev_id]); + capability_copy[dev_id] = NULL; + } + is_capability_checked[dev_id] = 0; if (retval < 0) return retval; @@ -949,6 +1097,35 @@ rte_cryptodev_close(uint8_t dev_id) return 0; } +int +rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; + } + + dev = &rte_crypto_devices[dev_id]; + if (queue_pair_id >= dev->data->nb_queue_pairs) { + CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); + return -EINVAL; + } + void **qps = dev->data->queue_pairs; + + if (qps[queue_pair_id]) { + CDEV_LOG_DEBUG("qp %d on dev %d is initialised", + queue_pair_id, dev_id); + return 1; + } + + CDEV_LOG_DEBUG("qp %d on dev %d is not initialised", + queue_pair_id, dev_id); + + return 0; +} + int rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) @@ -992,6 +1169,7 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, } s.nb_drivers = pool_priv->nb_drivers; + s.user_data_sz = pool_priv->user_data_sz; if ((rte_cryptodev_sym_get_existing_header_session_size(&s) > obj_size) || (s.nb_drivers <= dev->driver_id) || @@ -1010,6 +1188,7 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP); + rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf); return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf, socket_id); } @@ -1054,13 +1233,65 @@ rte_cryptodev_stats_reset(uint8_t dev_id) (*dev->dev_ops->stats_reset)(dev); } +static void +get_v20_capabilities(uint8_t dev_id, struct rte_cryptodev_info *dev_info) +{ + const struct rte_cryptodev_capabilities *capability; + uint8_t found_invalid_capa = 0; + uint8_t counter = 0; + + for (capability = dev_info->capabilities; + capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; + ++capability, ++counter) { + if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && + capability->sym.xform_type == + RTE_CRYPTO_SYM_XFORM_AEAD + && capability->sym.aead.algo >= + RTE_CRYPTO_AEAD_CHACHA20_POLY1305) { + found_invalid_capa = 1; + counter--; + } + } + is_capability_checked[dev_id] = 1; + if (!found_invalid_capa) + return; + capability_copy[dev_id] = malloc(counter * + sizeof(struct rte_cryptodev_capabilities)); + if (capability_copy[dev_id] == NULL) { + /* + * error case - no memory to store the trimmed + * list, so have to return an empty list + */ + dev_info->capabilities = + cryptodev_undefined_capabilities; + is_capability_checked[dev_id] = 0; + } else { + counter = 0; + for (capability = dev_info->capabilities; + capability->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED; + capability++) { + if (!(capability->op == + RTE_CRYPTO_OP_TYPE_SYMMETRIC + && capability->sym.xform_type == + RTE_CRYPTO_SYM_XFORM_AEAD + && capability->sym.aead.algo >= + RTE_CRYPTO_AEAD_CHACHA20_POLY1305)) { + capability_copy[dev_id][counter++] = + *capability; + } + } + dev_info->capabilities = + capability_copy[dev_id]; + } +} -void -rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) +void __vsym +rte_cryptodev_info_get_v20(uint8_t dev_id, struct rte_cryptodev_info *dev_info) { struct rte_cryptodev *dev; - if (dev_id >= cryptodev_globals.nb_devs) { + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); return; } @@ -1072,10 +1303,40 @@ rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); (*dev->dev_ops->dev_infos_get)(dev, dev_info); + if (capability_copy[dev_id] == NULL) { + if (!is_capability_checked[dev_id]) + get_v20_capabilities(dev_id, dev_info); + } else + dev_info->capabilities = capability_copy[dev_id]; + dev_info->driver_name = dev->device->driver->name; dev_info->device = dev->device; } +VERSION_SYMBOL(rte_cryptodev_info_get, _v20, 20.0); + +void __vsym +rte_cryptodev_info_get_v21(uint8_t dev_id, struct rte_cryptodev_info *dev_info) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); + return; + } + dev = &rte_crypto_devices[dev_id]; + + memset(dev_info, 0, sizeof(struct rte_cryptodev_info)); + + RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); + (*dev->dev_ops->dev_infos_get)(dev, dev_info); + + dev_info->driver_name = dev->device->driver->name; + dev_info->device = dev->device; +} +MAP_STATIC_SYMBOL(void rte_cryptodev_info_get(uint8_t dev_id, + struct rte_cryptodev_info *dev_info), rte_cryptodev_info_get_v21); +BIND_DEFAULT_SYMBOL(rte_cryptodev_info_get, _v21, 21); int rte_cryptodev_callback_register(uint8_t dev_id, @@ -1201,9 +1462,14 @@ rte_cryptodev_sym_session_init(uint8_t dev_id, uint8_t index; int ret; + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; + } + dev = rte_cryptodev_pmd_get_dev(dev_id); - if (sess == NULL || xforms == NULL || dev == NULL) + if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL) return -EINVAL; if (mp->elt_size < sess_priv_sz) @@ -1215,7 +1481,7 @@ rte_cryptodev_sym_session_init(uint8_t dev_id, RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP); - if (sess->sess_data[index].data == NULL) { + if (sess->sess_data[index].refcnt == 0) { ret = dev->dev_ops->sym_session_configure(dev, xforms, sess, mp); if (ret < 0) { @@ -1226,10 +1492,12 @@ rte_cryptodev_sym_session_init(uint8_t dev_id, } } + rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp); + sess->sess_data[index].refcnt++; return 0; } -int __rte_experimental +int rte_cryptodev_asym_session_init(uint8_t dev_id, struct rte_cryptodev_asym_session *sess, struct rte_crypto_asym_xform *xforms, @@ -1239,6 +1507,11 @@ rte_cryptodev_asym_session_init(uint8_t dev_id, uint8_t index; int ret; + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; + } + dev = rte_cryptodev_pmd_get_dev(dev_id); if (sess == NULL || xforms == NULL || dev == NULL) @@ -1261,10 +1534,11 @@ rte_cryptodev_asym_session_init(uint8_t dev_id, } } + rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp); return 0; } -struct rte_mempool * __rte_experimental +struct rte_mempool * rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size, int socket_id) @@ -1301,13 +1575,36 @@ rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, pool_priv->nb_drivers = nb_drivers; pool_priv->user_data_sz = user_data_size; + rte_cryptodev_trace_sym_session_pool_create(name, nb_elts, + elt_size, cache_size, user_data_size, mp); return mp; } static unsigned int rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess) { - return (sizeof(sess->sess_data[0]) * sess->nb_drivers); + return (sizeof(sess->sess_data[0]) * sess->nb_drivers) + + sess->user_data_sz; +} + +static uint8_t +rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp) +{ + struct rte_cryptodev_sym_session_pool_private_data *pool_priv; + + if (!mp) + return 0; + + pool_priv = rte_mempool_get_priv(mp); + + if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) || + pool_priv->nb_drivers != nb_drivers || + mp->elt_size < + rte_cryptodev_sym_get_header_session_size() + + pool_priv->user_data_sz) + return 0; + + return 1; } struct rte_cryptodev_sym_session * @@ -1316,18 +1613,13 @@ rte_cryptodev_sym_session_create(struct rte_mempool *mp) struct rte_cryptodev_sym_session *sess; struct rte_cryptodev_sym_session_pool_private_data *pool_priv; - if (!mp) { + if (!rte_cryptodev_sym_is_valid_session_pool(mp)) { CDEV_LOG_ERR("Invalid mempool\n"); return NULL; } pool_priv = rte_mempool_get_priv(mp); - if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) { - CDEV_LOG_ERR("Invalid mempool\n"); - return NULL; - } - /* Allocate a session structure from the session pool */ if (rte_mempool_get(mp, (void **)&sess)) { CDEV_LOG_ERR("couldn't get object from session mempool"); @@ -1335,7 +1627,8 @@ rte_cryptodev_sym_session_create(struct rte_mempool *mp) } sess->nb_drivers = pool_priv->nb_drivers; - + sess->user_data_sz = pool_priv->user_data_sz; + sess->opaque_data = 0; /* Clear device session pointer. * Include the flag indicating presence of user data @@ -1343,13 +1636,28 @@ rte_cryptodev_sym_session_create(struct rte_mempool *mp) memset(sess->sess_data, 0, rte_cryptodev_sym_session_data_size(sess)); + rte_cryptodev_trace_sym_session_create(mp, sess); return sess; } -struct rte_cryptodev_asym_session * __rte_experimental +struct rte_cryptodev_asym_session * rte_cryptodev_asym_session_create(struct rte_mempool *mp) { struct rte_cryptodev_asym_session *sess; + unsigned int session_size = + rte_cryptodev_asym_get_header_session_size(); + + if (!mp) { + CDEV_LOG_ERR("invalid mempool\n"); + return NULL; + } + + /* Verify if provided mempool can hold elements big enough. */ + if (mp->elt_size < session_size) { + CDEV_LOG_ERR( + "mempool elements too small to hold session objects"); + return NULL; + } /* Allocate a session structure from the session pool */ if (rte_mempool_get(mp, (void **)&sess)) { @@ -1360,8 +1668,9 @@ rte_cryptodev_asym_session_create(struct rte_mempool *mp) /* Clear device session pointer. * Include the flag indicating presence of private data */ - memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t)); + memset(sess, 0, session_size); + rte_cryptodev_trace_asym_session_create(mp, sess); return sess; } @@ -1370,25 +1679,43 @@ rte_cryptodev_sym_session_clear(uint8_t dev_id, struct rte_cryptodev_sym_session *sess) { struct rte_cryptodev *dev; + uint8_t driver_id; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; + } dev = rte_cryptodev_pmd_get_dev(dev_id); if (dev == NULL || sess == NULL) return -EINVAL; + driver_id = dev->driver_id; + if (sess->sess_data[driver_id].refcnt == 0) + return 0; + if (--sess->sess_data[driver_id].refcnt != 0) + return -EBUSY; + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP); dev->dev_ops->sym_session_clear(dev, sess); + rte_cryptodev_trace_sym_session_clear(dev_id, sess); return 0; } -int __rte_experimental +int rte_cryptodev_asym_session_clear(uint8_t dev_id, struct rte_cryptodev_asym_session *sess) { struct rte_cryptodev *dev; + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; + } + dev = rte_cryptodev_pmd_get_dev(dev_id); if (dev == NULL || sess == NULL) @@ -1398,6 +1725,7 @@ rte_cryptodev_asym_session_clear(uint8_t dev_id, dev->dev_ops->asym_session_clear(dev, sess); + rte_cryptodev_trace_sym_session_clear(dev_id, sess); return 0; } @@ -1405,16 +1733,14 @@ int rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess) { uint8_t i; - void *sess_priv; struct rte_mempool *sess_mp; if (sess == NULL) return -EINVAL; /* Check that all device private data has been freed */ - for (i = 0; i < nb_drivers; i++) { - sess_priv = get_sym_session_private_data(sess, i); - if (sess_priv != NULL) + for (i = 0; i < sess->nb_drivers; i++) { + if (sess->sess_data[i].refcnt != 0) return -EBUSY; } @@ -1422,10 +1748,11 @@ rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess) sess_mp = rte_mempool_from_obj(sess); rte_mempool_put(sess_mp, sess); + rte_cryptodev_trace_sym_session_free(sess); return 0; } -int __rte_experimental +int rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess) { uint8_t i; @@ -1446,6 +1773,7 @@ rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess) sess_mp = rte_mempool_from_obj(sess); rte_mempool_put(sess_mp, sess); + rte_cryptodev_trace_asym_session_free(sess); return 0; } @@ -1465,7 +1793,7 @@ rte_cryptodev_sym_get_header_session_size(void) rte_cryptodev_sym_session_data_size(&s)); } -unsigned int __rte_experimental +unsigned int rte_cryptodev_sym_get_existing_header_session_size( struct rte_cryptodev_sym_session *sess) { @@ -1476,7 +1804,7 @@ rte_cryptodev_sym_get_existing_header_session_size( rte_cryptodev_sym_session_data_size(sess)); } -unsigned int __rte_experimental +unsigned int rte_cryptodev_asym_get_header_session_size(void) { /* @@ -1506,7 +1834,7 @@ rte_cryptodev_sym_get_private_session_size(uint8_t dev_id) return priv_sess_size; } -unsigned int __rte_experimental +unsigned int rte_cryptodev_asym_get_private_session_size(uint8_t dev_id) { struct rte_cryptodev *dev; @@ -1529,7 +1857,7 @@ rte_cryptodev_asym_get_private_session_size(uint8_t dev_id) } -int __rte_experimental +int rte_cryptodev_sym_session_set_user_data( struct rte_cryptodev_sym_session *sess, void *data, @@ -1538,20 +1866,54 @@ rte_cryptodev_sym_session_set_user_data( if (sess == NULL) return -EINVAL; + if (sess->user_data_sz < size) + return -ENOMEM; + rte_memcpy(sess->sess_data + sess->nb_drivers, data, size); return 0; } -void * __rte_experimental +void * rte_cryptodev_sym_session_get_user_data( struct rte_cryptodev_sym_session *sess) { - if (sess == NULL) + if (sess == NULL || sess->user_data_sz == 0) return NULL; return (void *)(sess->sess_data + sess->nb_drivers); } +static inline void +sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum) +{ + uint32_t i; + for (i = 0; i < vec->num; i++) + vec->status[i] = errnum; +} + +uint32_t +rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, + struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs, + struct rte_crypto_sym_vec *vec) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + sym_crypto_fill_status(vec, EINVAL); + return 0; + } + + dev = rte_cryptodev_pmd_get_dev(dev_id); + + if (*dev->dev_ops->sym_cpu_process == NULL || + !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) { + sym_crypto_fill_status(vec, ENOTSUP); + return 0; + } + + return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec); +} + /** Initialise rte_crypto_op mempool element */ static void rte_crypto_op_init(struct rte_mempool *mempool, @@ -1681,7 +2043,7 @@ rte_cryptodev_driver_id_get(const char *name) TAILQ_FOREACH(driver, &cryptodev_driver_list, next) { driver_name = driver->driver->name; - if (strncmp(driver_name, name, strlen(driver_name)) == 0) + if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) return driver->id; } return -1; @@ -1690,8 +2052,14 @@ rte_cryptodev_driver_id_get(const char *name) const char * rte_cryptodev_name_get(uint8_t dev_id) { - struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id); + struct rte_cryptodev *dev; + if (!rte_cryptodev_is_valid_device_data(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return NULL; + } + + dev = rte_cryptodev_pmd_get_dev(dev_id); if (dev == NULL) return NULL;