X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_cryptodev%2Frte_cryptodev.c;h=ab4b23196209393c15e3d5f3b6789c62be4cf39a;hb=f5862ae99e058c0cee36a08dfd51f8a3b766999a;hp=2a95a351f0a3712f31461097b9ba08f61a979f56;hpb=2d96371fbd80d9163a75b7fba6750f89a708e0fb;p=dpdk.git diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c index 2a95a351f0..ab4b231962 100644 --- a/lib/librte_cryptodev/rte_cryptodev.c +++ b/lib/librte_cryptodev/rte_cryptodev.c @@ -43,19 +43,16 @@ static uint8_t nb_drivers; -struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS]; +static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS]; -struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0]; +struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices; static struct rte_cryptodev_global cryptodev_globals = { - .devs = &rte_crypto_devices[0], + .devs = rte_crypto_devices, .data = { NULL }, - .nb_devs = 0, - .max_devs = RTE_CRYPTO_MAX_DEVS + .nb_devs = 0 }; -struct rte_cryptodev_global *rte_cryptodev_globals = &cryptodev_globals; - /* spinlock for crypto device callbacks */ static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER; @@ -166,6 +163,43 @@ rte_crypto_aead_operation_strings[] = { [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt" }; +/** + * Asymmetric crypto transform operation strings identifiers. + */ +const char *rte_crypto_asym_xform_strings[] = { + [RTE_CRYPTO_ASYM_XFORM_NONE] = "none", + [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa", + [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp", + [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv", + [RTE_CRYPTO_ASYM_XFORM_DH] = "dh", + [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa", + [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa", + [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm", +}; + +/** + * Asymmetric crypto operation strings identifiers. + */ +const char *rte_crypto_asym_op_strings[] = { + [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt", + [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt", + [RTE_CRYPTO_ASYM_OP_SIGN] = "sign", + [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify", + [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE] = "priv_key_generate", + [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate", + [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute", +}; + +/** + * The private data structure stored in the session mempool private data. + */ +struct rte_cryptodev_sym_session_pool_private_data { + uint16_t nb_drivers; + /**< number of elements in sess_data array */ + uint16_t user_data_sz; + /**< session user data will be placed after sess_data */ +}; + int rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, const char *algo_string) @@ -217,6 +251,24 @@ rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, return -1; } +int +rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, + const char *xform_string) +{ + unsigned int i; + + for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) { + if (strcmp(xform_string, + rte_crypto_asym_xform_strings[i]) == 0) { + *xform_enum = (enum rte_crypto_asym_xform_type) i; + return 0; + } + } + + /* Invalid string */ + return -1; +} + /** * The crypto auth operation strings identifiers. * It could be used in application command line. @@ -262,19 +314,62 @@ rte_cryptodev_sym_capability_get(uint8_t dev_id, } -#define param_range_check(x, y) \ - (((x < y.min) || (x > y.max)) || \ - (y.increment != 0 && (x % y.increment) != 0)) +static int +param_range_check(uint16_t size, const struct rte_crypto_param_range *range) +{ + unsigned int next_size; + + /* Check lower/upper bounds */ + if (size < range->min) + return -1; + + if (size > range->max) + return -1; + + /* If range is actually only one value, size is correct */ + if (range->increment == 0) + return 0; + + /* Check if value is one of the supported sizes */ + for (next_size = range->min; next_size <= range->max; + next_size += range->increment) + if (size == next_size) + return 0; + + return -1; +} + +const struct rte_cryptodev_asymmetric_xform_capability * +rte_cryptodev_asym_capability_get(uint8_t dev_id, + const struct rte_cryptodev_asym_capability_idx *idx) +{ + const struct rte_cryptodev_capabilities *capability; + struct rte_cryptodev_info dev_info; + unsigned int i = 0; + + memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); + rte_cryptodev_info_get(dev_id, &dev_info); + + while ((capability = &dev_info.capabilities[i++])->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC) + continue; + + if (capability->asym.xform_capa.xform_type == idx->type) + return &capability->asym.xform_capa; + } + return NULL; +}; int rte_cryptodev_sym_capability_check_cipher( const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t iv_size) { - if (param_range_check(key_size, capability->cipher.key_size)) + if (param_range_check(key_size, &capability->cipher.key_size) != 0) return -1; - if (param_range_check(iv_size, capability->cipher.iv_size)) + if (param_range_check(iv_size, &capability->cipher.iv_size) != 0) return -1; return 0; @@ -285,13 +380,13 @@ rte_cryptodev_sym_capability_check_auth( const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t digest_size, uint16_t iv_size) { - if (param_range_check(key_size, capability->auth.key_size)) + if (param_range_check(key_size, &capability->auth.key_size) != 0) return -1; - if (param_range_check(digest_size, capability->auth.digest_size)) + if (param_range_check(digest_size, &capability->auth.digest_size) != 0) return -1; - if (param_range_check(iv_size, capability->auth.iv_size)) + if (param_range_check(iv_size, &capability->auth.iv_size) != 0) return -1; return 0; @@ -303,20 +398,56 @@ rte_cryptodev_sym_capability_check_aead( uint16_t key_size, uint16_t digest_size, uint16_t aad_size, uint16_t iv_size) { - if (param_range_check(key_size, capability->aead.key_size)) + if (param_range_check(key_size, &capability->aead.key_size) != 0) return -1; - if (param_range_check(digest_size, capability->aead.digest_size)) + if (param_range_check(digest_size, &capability->aead.digest_size) != 0) return -1; - if (param_range_check(aad_size, capability->aead.aad_size)) + if (param_range_check(aad_size, &capability->aead.aad_size) != 0) return -1; - if (param_range_check(iv_size, capability->aead.iv_size)) + if (param_range_check(iv_size, &capability->aead.iv_size) != 0) return -1; return 0; } +int +rte_cryptodev_asym_xform_capability_check_optype( + const struct rte_cryptodev_asymmetric_xform_capability *capability, + enum rte_crypto_asym_op_type op_type) +{ + if (capability->op_types & (1 << op_type)) + return 1; + + return 0; +} + +int +rte_cryptodev_asym_xform_capability_check_modlen( + const struct rte_cryptodev_asymmetric_xform_capability *capability, + uint16_t modlen) +{ + /* no need to check for limits, if min or max = 0 */ + if (capability->modlen.min != 0) { + if (modlen < capability->modlen.min) + return -1; + } + + if (capability->modlen.max != 0) { + if (modlen > capability->modlen.max) + return -1; + } + + /* in any case, check if given modlen is module increment */ + if (capability->modlen.increment != 0) { + if (modlen % (capability->modlen.increment)) + return -1; + } + + return 0; +} + const char * rte_cryptodev_get_feature_name(uint64_t flag) @@ -340,12 +471,28 @@ rte_cryptodev_get_feature_name(uint64_t flag) return "CPU_AESNI"; case RTE_CRYPTODEV_FF_HW_ACCELERATED: return "HW_ACCELERATED"; - case RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER: - return "MBUF_SCATTER_GATHER"; + case RTE_CRYPTODEV_FF_IN_PLACE_SGL: + return "IN_PLACE_SGL"; + case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT: + return "OOP_SGL_IN_SGL_OUT"; + case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT: + return "OOP_SGL_IN_LB_OUT"; + case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT: + return "OOP_LB_IN_SGL_OUT"; + case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT: + return "OOP_LB_IN_LB_OUT"; case RTE_CRYPTODEV_FF_CPU_NEON: return "CPU_NEON"; case RTE_CRYPTODEV_FF_CPU_ARM_CE: return "CPU_ARM_CE"; + case RTE_CRYPTODEV_FF_SECURITY: + return "SECURITY_PROTOCOL"; + case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP: + return "RSA_PRIV_OP_KEY_EXP"; + case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT: + return "RSA_PRIV_OP_KEY_QT"; + case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED: + return "DIGEST_ENCRYPTED"; default: return NULL; } @@ -354,7 +501,7 @@ rte_cryptodev_get_feature_name(uint64_t flag) struct rte_cryptodev * rte_cryptodev_pmd_get_dev(uint8_t dev_id) { - return &rte_cryptodev_globals->devs[dev_id]; + return &cryptodev_globals.devs[dev_id]; } struct rte_cryptodev * @@ -366,8 +513,8 @@ rte_cryptodev_pmd_get_named_dev(const char *name) if (name == NULL) return NULL; - for (i = 0; i < rte_cryptodev_globals->max_devs; i++) { - dev = &rte_cryptodev_globals->devs[i]; + for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { + dev = &cryptodev_globals.devs[i]; if ((dev->attached == RTE_CRYPTODEV_ATTACHED) && (strcmp(dev->data->name, name) == 0)) @@ -377,12 +524,21 @@ rte_cryptodev_pmd_get_named_dev(const char *name) return NULL; } +static inline uint8_t +rte_cryptodev_is_valid_device_data(uint8_t dev_id) +{ + if (rte_crypto_devices[dev_id].data == NULL) + return 0; + + return 1; +} + unsigned int rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id) { struct rte_cryptodev *dev = NULL; - if (dev_id >= rte_cryptodev_globals->nb_devs) + if (!rte_cryptodev_is_valid_device_data(dev_id)) return 0; dev = rte_cryptodev_pmd_get_dev(dev_id); @@ -401,12 +557,15 @@ rte_cryptodev_get_dev_id(const char *name) if (name == NULL) return -1; - for (i = 0; i < rte_cryptodev_globals->nb_devs; i++) - if ((strcmp(rte_cryptodev_globals->devs[i].data->name, name) + for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { + if (!rte_cryptodev_is_valid_device_data(i)) + continue; + if ((strcmp(cryptodev_globals.devs[i].data->name, name) == 0) && - (rte_cryptodev_globals->devs[i].attached == + (cryptodev_globals.devs[i].attached == RTE_CRYPTODEV_ATTACHED)) return i; + } return -1; } @@ -414,7 +573,7 @@ rte_cryptodev_get_dev_id(const char *name) uint8_t rte_cryptodev_count(void) { - return rte_cryptodev_globals->nb_devs; + return cryptodev_globals.nb_devs; } uint8_t @@ -422,9 +581,9 @@ rte_cryptodev_device_count_by_driver(uint8_t driver_id) { uint8_t i, dev_count = 0; - for (i = 0; i < rte_cryptodev_globals->max_devs; i++) - if (rte_cryptodev_globals->devs[i].driver_id == driver_id && - rte_cryptodev_globals->devs[i].attached == + for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) + if (cryptodev_globals.devs[i].driver_id == driver_id && + cryptodev_globals.devs[i].attached == RTE_CRYPTODEV_ATTACHED) dev_count++; @@ -436,17 +595,18 @@ rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, uint8_t nb_devices) { uint8_t i, count = 0; - struct rte_cryptodev *devs = rte_cryptodev_globals->devs; - uint8_t max_devs = rte_cryptodev_globals->max_devs; + struct rte_cryptodev *devs = cryptodev_globals.devs; - for (i = 0; i < max_devs && count < nb_devices; i++) { + for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) { + if (!rte_cryptodev_is_valid_device_data(i)) + continue; if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) { int cmp; cmp = strncmp(devs[i].device->driver->name, driver_name, - strlen(driver_name)); + strlen(driver_name) + 1); if (cmp == 0) devices[count++] = devs[i].data->dev_id; @@ -483,7 +643,7 @@ static inline int rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data, int socket_id) { - char mz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + char mz_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; int n; @@ -509,6 +669,31 @@ rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data, return 0; } +static inline int +rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data) +{ + char mz_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + int n; + + /* generate memzone name */ + n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); + if (n >= (int)sizeof(mz_name)) + return -EINVAL; + + mz = rte_memzone_lookup(mz_name); + if (mz == NULL) + return -ENOMEM; + + RTE_ASSERT(*data == mz->addr); + *data = NULL; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + return rte_memzone_free(mz); + + return 0; +} + static uint8_t rte_cryptodev_find_free_device_index(void) { @@ -543,23 +728,25 @@ rte_cryptodev_pmd_allocate(const char *name, int socket_id) cryptodev = rte_cryptodev_pmd_get_dev(dev_id); if (cryptodev->data == NULL) { - struct rte_cryptodev_data *cryptodev_data = - cryptodev_globals.data[dev_id]; + struct rte_cryptodev_data **cryptodev_data = + &cryptodev_globals.data[dev_id]; - int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data, + int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data, socket_id); - if (retval < 0 || cryptodev_data == NULL) + if (retval < 0 || *cryptodev_data == NULL) return NULL; - cryptodev->data = cryptodev_data; + cryptodev->data = *cryptodev_data; - snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN, - "%s", name); + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + strlcpy(cryptodev->data->name, name, + RTE_CRYPTODEV_NAME_MAX_LEN); - cryptodev->data->dev_id = dev_id; - cryptodev->data->socket_id = socket_id; - cryptodev->data->dev_started = 0; + cryptodev->data->dev_id = dev_id; + cryptodev->data->socket_id = socket_id; + cryptodev->data->dev_started = 0; + } /* init user callbacks */ TAILQ_INIT(&(cryptodev->link_intr_cbs)); @@ -576,17 +763,24 @@ int rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev) { int ret; + uint8_t dev_id; if (cryptodev == NULL) return -EINVAL; + dev_id = cryptodev->data->dev_id; + /* Close device only if device operations have been set */ if (cryptodev->dev_ops) { - ret = rte_cryptodev_close(cryptodev->data->dev_id); + ret = rte_cryptodev_close(dev_id); if (ret < 0) return ret; } + ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]); + if (ret < 0) + return ret; + cryptodev->attached = RTE_CRYPTODEV_DETACHED; cryptodev_globals.nb_devs--; return 0; @@ -679,50 +873,6 @@ rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs, return 0; } -int -rte_cryptodev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id) -{ - struct rte_cryptodev *dev; - - if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { - CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); - return -EINVAL; - } - - dev = &rte_crypto_devices[dev_id]; - if (queue_pair_id >= dev->data->nb_queue_pairs) { - CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); - return -EINVAL; - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_start, -ENOTSUP); - - return dev->dev_ops->queue_pair_start(dev, queue_pair_id); - -} - -int -rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id) -{ - struct rte_cryptodev *dev; - - if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { - CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); - return -EINVAL; - } - - dev = &rte_crypto_devices[dev_id]; - if (queue_pair_id >= dev->data->nb_queue_pairs) { - CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); - return -EINVAL; - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_stop, -ENOTSUP); - - return dev->dev_ops->queue_pair_stop(dev, queue_pair_id); - -} - int rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config) { @@ -855,8 +1005,7 @@ rte_cryptodev_close(uint8_t dev_id) int rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, - const struct rte_cryptodev_qp_conf *qp_conf, int socket_id, - struct rte_mempool *session_pool) + const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) { struct rte_cryptodev *dev; @@ -872,6 +1021,42 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, return -EINVAL; } + if (!qp_conf) { + CDEV_LOG_ERR("qp_conf cannot be NULL\n"); + return -EINVAL; + } + + if ((qp_conf->mp_session && !qp_conf->mp_session_private) || + (!qp_conf->mp_session && qp_conf->mp_session_private)) { + CDEV_LOG_ERR("Invalid mempools\n"); + return -EINVAL; + } + + if (qp_conf->mp_session) { + struct rte_cryptodev_sym_session_pool_private_data *pool_priv; + uint32_t obj_size = qp_conf->mp_session->elt_size; + uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size; + struct rte_cryptodev_sym_session s = {0}; + + pool_priv = rte_mempool_get_priv(qp_conf->mp_session); + if (!pool_priv || qp_conf->mp_session->private_data_size < + sizeof(*pool_priv)) { + CDEV_LOG_ERR("Invalid mempool\n"); + return -EINVAL; + } + + s.nb_drivers = pool_priv->nb_drivers; + s.user_data_sz = pool_priv->user_data_sz; + + if ((rte_cryptodev_sym_get_existing_header_session_size(&s) > + obj_size) || (s.nb_drivers <= dev->driver_id) || + rte_cryptodev_sym_get_private_session_size(dev_id) > + obj_priv_size) { + CDEV_LOG_ERR("Invalid mempool\n"); + return -EINVAL; + } + } + if (dev->data->dev_started) { CDEV_LOG_ERR( "device %d must be stopped to allow configuration", dev_id); @@ -881,7 +1066,7 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP); return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf, - socket_id, session_pool); + socket_id); } @@ -930,7 +1115,7 @@ rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) { struct rte_cryptodev *dev; - if (dev_id >= cryptodev_globals.nb_devs) { + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); return; } @@ -943,6 +1128,7 @@ rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) (*dev->dev_ops->dev_infos_get)(dev, dev_info); dev_info->driver_name = dev->device->driver->name; + dev_info->device = dev->device; } @@ -1065,6 +1251,8 @@ rte_cryptodev_sym_session_init(uint8_t dev_id, struct rte_mempool *mp) { struct rte_cryptodev *dev; + uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size( + dev_id); uint8_t index; int ret; @@ -1073,10 +1261,54 @@ rte_cryptodev_sym_session_init(uint8_t dev_id, if (sess == NULL || xforms == NULL || dev == NULL) return -EINVAL; + if (mp->elt_size < sess_priv_sz) + return -EINVAL; + index = dev->driver_id; + if (index >= sess->nb_drivers) + return -EINVAL; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP); + + if (sess->sess_data[index].refcnt == 0) { + ret = dev->dev_ops->sym_session_configure(dev, xforms, + sess, mp); + if (ret < 0) { + CDEV_LOG_ERR( + "dev_id %d failed to configure session details", + dev_id); + return ret; + } + } + + sess->sess_data[index].refcnt++; + return 0; +} + +int +rte_cryptodev_asym_session_init(uint8_t dev_id, + struct rte_cryptodev_asym_session *sess, + struct rte_crypto_asym_xform *xforms, + struct rte_mempool *mp) +{ + struct rte_cryptodev *dev; + uint8_t index; + int ret; + + dev = rte_cryptodev_pmd_get_dev(dev_id); + + if (sess == NULL || xforms == NULL || dev == NULL) + return -EINVAL; + + index = dev->driver_id; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure, + -ENOTSUP); if (sess->sess_private_data[index] == NULL) { - ret = dev->dev_ops->session_configure(dev, xforms, sess, mp); + ret = dev->dev_ops->asym_session_configure(dev, + xforms, + sess, mp); if (ret < 0) { CDEV_LOG_ERR( "dev_id %d failed to configure session details", @@ -1088,10 +1320,94 @@ rte_cryptodev_sym_session_init(uint8_t dev_id, return 0; } +struct rte_mempool * +rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, + uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size, + int socket_id) +{ + struct rte_mempool *mp; + struct rte_cryptodev_sym_session_pool_private_data *pool_priv; + uint32_t obj_sz; + + obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size; + if (obj_sz > elt_size) + CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size, + obj_sz); + else + obj_sz = elt_size; + + mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size, + (uint32_t)(sizeof(*pool_priv)), + NULL, NULL, NULL, NULL, + socket_id, 0); + if (mp == NULL) { + CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n", + __func__, name, rte_errno); + return NULL; + } + + pool_priv = rte_mempool_get_priv(mp); + if (!pool_priv) { + CDEV_LOG_ERR("%s(name=%s) failed to get private data\n", + __func__, name); + rte_mempool_free(mp); + return NULL; + } + + pool_priv->nb_drivers = nb_drivers; + pool_priv->user_data_sz = user_data_size; + + return mp; +} + +static unsigned int +rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess) +{ + return (sizeof(sess->sess_data[0]) * sess->nb_drivers) + + sess->user_data_sz; +} + struct rte_cryptodev_sym_session * rte_cryptodev_sym_session_create(struct rte_mempool *mp) { struct rte_cryptodev_sym_session *sess; + struct rte_cryptodev_sym_session_pool_private_data *pool_priv; + + if (!mp) { + CDEV_LOG_ERR("Invalid mempool\n"); + return NULL; + } + + pool_priv = rte_mempool_get_priv(mp); + + if (!pool_priv || mp->private_data_size < sizeof(*pool_priv)) { + CDEV_LOG_ERR("Invalid mempool\n"); + return NULL; + } + + /* Allocate a session structure from the session pool */ + if (rte_mempool_get(mp, (void **)&sess)) { + CDEV_LOG_ERR("couldn't get object from session mempool"); + return NULL; + } + + sess->nb_drivers = pool_priv->nb_drivers; + sess->user_data_sz = pool_priv->user_data_sz; + sess->opaque_data = 0; + + /* Clear device session pointer. + * Include the flag indicating presence of user data + */ + memset(sess->sess_data, 0, + rte_cryptodev_sym_session_data_size(sess)); + + return sess; +} + +struct rte_cryptodev_asym_session * +rte_cryptodev_asym_session_create(struct rte_mempool *mp) +{ + struct rte_cryptodev_asym_session *sess; /* Allocate a session structure from the session pool */ if (rte_mempool_get(mp, (void **)&sess)) { @@ -1108,77 +1424,72 @@ rte_cryptodev_sym_session_create(struct rte_mempool *mp) } int -rte_cryptodev_queue_pair_attach_sym_session(uint8_t dev_id, uint16_t qp_id, +rte_cryptodev_sym_session_clear(uint8_t dev_id, struct rte_cryptodev_sym_session *sess) { struct rte_cryptodev *dev; + uint8_t driver_id; - if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { - CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); - return -EINVAL; - } + dev = rte_cryptodev_pmd_get_dev(dev_id); - dev = &rte_crypto_devices[dev_id]; + if (dev == NULL || sess == NULL) + return -EINVAL; - /* The API is optional, not returning error if driver do not suuport */ - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_attach_session, 0); + driver_id = dev->driver_id; + if (sess->sess_data[driver_id].refcnt == 0) + return 0; + if (--sess->sess_data[driver_id].refcnt != 0) + return -EBUSY; - void *sess_priv = get_session_private_data(sess, dev->driver_id); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP); - if (dev->dev_ops->qp_attach_session(dev, qp_id, sess_priv)) { - CDEV_LOG_ERR("dev_id %d failed to attach qp: %d with session", - dev_id, qp_id); - return -EPERM; - } + dev->dev_ops->sym_session_clear(dev, sess); return 0; } int -rte_cryptodev_queue_pair_detach_sym_session(uint8_t dev_id, uint16_t qp_id, - struct rte_cryptodev_sym_session *sess) +rte_cryptodev_asym_session_clear(uint8_t dev_id, + struct rte_cryptodev_asym_session *sess) { struct rte_cryptodev *dev; - if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { - CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); - return -EINVAL; - } - - dev = &rte_crypto_devices[dev_id]; + dev = rte_cryptodev_pmd_get_dev(dev_id); - /* The API is optional, not returning error if driver do not suuport */ - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_detach_session, 0); + if (dev == NULL || sess == NULL) + return -EINVAL; - void *sess_priv = get_session_private_data(sess, dev->driver_id); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP); - if (dev->dev_ops->qp_detach_session(dev, qp_id, sess_priv)) { - CDEV_LOG_ERR("dev_id %d failed to detach qp: %d from session", - dev_id, qp_id); - return -EPERM; - } + dev->dev_ops->asym_session_clear(dev, sess); return 0; } int -rte_cryptodev_sym_session_clear(uint8_t dev_id, - struct rte_cryptodev_sym_session *sess) +rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess) { - struct rte_cryptodev *dev; - - dev = rte_cryptodev_pmd_get_dev(dev_id); + uint8_t i; + struct rte_mempool *sess_mp; - if (dev == NULL || sess == NULL) + if (sess == NULL) return -EINVAL; - dev->dev_ops->session_clear(dev, sess); + /* Check that all device private data has been freed */ + for (i = 0; i < sess->nb_drivers; i++) { + if (sess->sess_data[i].refcnt != 0) + return -EBUSY; + } + + /* Return session to mempool */ + sess_mp = rte_mempool_from_obj(sess); + rte_mempool_put(sess_mp, sess); return 0; } int -rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess) +rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess) { uint8_t i; void *sess_priv; @@ -1189,7 +1500,7 @@ rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess) /* Check that all device private data has been freed */ for (i = 0; i < nb_drivers; i++) { - sess_priv = get_session_private_data(sess, i); + sess_priv = get_asym_session_private_data(sess, i); if (sess_priv != NULL) return -EBUSY; } @@ -1202,7 +1513,34 @@ rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess) } unsigned int -rte_cryptodev_get_header_session_size(void) +rte_cryptodev_sym_get_header_session_size(void) +{ + /* + * Header contains pointers to the private data of all registered + * drivers and all necessary information to ensure safely clear + * or free al session. + */ + struct rte_cryptodev_sym_session s = {0}; + + s.nb_drivers = nb_drivers; + + return (unsigned int)(sizeof(s) + + rte_cryptodev_sym_session_data_size(&s)); +} + +unsigned int +rte_cryptodev_sym_get_existing_header_session_size( + struct rte_cryptodev_sym_session *sess) +{ + if (!sess) + return 0; + else + return (unsigned int)(sizeof(*sess) + + rte_cryptodev_sym_session_data_size(sess)); +} + +unsigned int +rte_cryptodev_asym_get_header_session_size(void) { /* * Header contains pointers to the private data @@ -1213,10 +1551,9 @@ rte_cryptodev_get_header_session_size(void) } unsigned int -rte_cryptodev_get_private_session_size(uint8_t dev_id) +rte_cryptodev_sym_get_private_session_size(uint8_t dev_id) { struct rte_cryptodev *dev; - unsigned int header_size = sizeof(void *) * nb_drivers; unsigned int priv_sess_size; if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) @@ -1224,16 +1561,30 @@ rte_cryptodev_get_private_session_size(uint8_t dev_id) dev = rte_cryptodev_pmd_get_dev(dev_id); - if (*dev->dev_ops->session_get_size == NULL) + if (*dev->dev_ops->sym_session_get_size == NULL) return 0; - priv_sess_size = (*dev->dev_ops->session_get_size)(dev); + priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev); - /* - * If size is less than session header size, - * return the latter, as this guarantees that - * sessionless operations will work - */ + return priv_sess_size; +} + +unsigned int +rte_cryptodev_asym_get_private_session_size(uint8_t dev_id) +{ + struct rte_cryptodev *dev; + unsigned int header_size = sizeof(void *) * nb_drivers; + unsigned int priv_sess_size; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) + return 0; + + dev = rte_cryptodev_pmd_get_dev(dev_id); + + if (*dev->dev_ops->asym_session_get_size == NULL) + return 0; + + priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev); if (priv_sess_size < header_size) return header_size; @@ -1241,36 +1592,30 @@ rte_cryptodev_get_private_session_size(uint8_t dev_id) } -int __rte_experimental -rte_cryptodev_sym_session_set_private_data( +int +rte_cryptodev_sym_session_set_user_data( struct rte_cryptodev_sym_session *sess, void *data, uint16_t size) { - uint16_t off_set = sizeof(void *) * nb_drivers; - uint8_t *private_data_present = (uint8_t *)sess + off_set; - if (sess == NULL) return -EINVAL; - *private_data_present = 1; - off_set += sizeof(uint8_t); - rte_memcpy((uint8_t *)sess + off_set, data, size); + if (sess->user_data_sz < size) + return -ENOMEM; + + rte_memcpy(sess->sess_data + sess->nb_drivers, data, size); return 0; } -void * __rte_experimental -rte_cryptodev_sym_session_get_private_data( +void * +rte_cryptodev_sym_session_get_user_data( struct rte_cryptodev_sym_session *sess) { - uint16_t off_set = sizeof(void *) * nb_drivers; - uint8_t *private_data_present = (uint8_t *)sess + off_set; - - if (sess == NULL || !*private_data_present) + if (sess == NULL || sess->user_data_sz == 0) return NULL; - off_set += sizeof(uint8_t); - return (uint8_t *)sess + off_set; + return (void *)(sess->sess_data + sess->nb_drivers); } /** Initialise rte_crypto_op mempool element */ @@ -1300,9 +1645,20 @@ rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type, struct rte_crypto_op_pool_private *priv; unsigned elt_size = sizeof(struct rte_crypto_op) + - sizeof(struct rte_crypto_sym_op) + priv_size; + if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { + elt_size += sizeof(struct rte_crypto_sym_op); + } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { + elt_size += sizeof(struct rte_crypto_asym_op); + } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) { + elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op), + sizeof(struct rte_crypto_asym_op)); + } else { + CDEV_LOG_ERR("Invalid op_type\n"); + return NULL; + } + /* lookup mempool in case already allocated */ struct rte_mempool *mp = rte_mempool_lookup(name); @@ -1391,7 +1747,7 @@ rte_cryptodev_driver_id_get(const char *name) TAILQ_FOREACH(driver, &cryptodev_driver_list, next) { driver_name = driver->driver->name; - if (strncmp(driver_name, name, strlen(driver_name)) == 0) + if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) return driver->id; } return -1;