X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Faesni_gcm%2Faesni_gcm_pmd_ops.c;h=0f315f008c8bb43923543974a9c6a6142e74e075;hb=2b252686bac0c3d8c7e2e64d0227ac5603a09a30;hp=f865e0dd6d0ec7d744b35640a7bd23371444b90c;hpb=eec136f3c54fcf1e585c3b96835be3dfa518c448;p=dpdk.git diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c index f865e0dd6d..0f315f008c 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c @@ -38,9 +38,69 @@ #include "aesni_gcm_pmd_private.h" +static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = { + { /* AES GMAC (AUTH) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_GMAC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 8, + .max = 16, + .increment = 4 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + { /* AES GCM */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + {.aead = { + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 8, + .max = 16, + .increment = 4 + }, + .aad_size = { + .min = 0, + .max = 65535, + .increment = 1 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + /** Configure device */ static int -aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev) +aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev, + __rte_unused struct rte_cryptodev_config *config) { return 0; } @@ -106,7 +166,9 @@ aesni_gcm_pmd_info_get(struct rte_cryptodev *dev, struct aesni_gcm_private *internals = dev->data->dev_private; if (dev_info != NULL) { - dev_info->dev_type = dev->dev_type; + dev_info->driver_id = dev->driver_id; + dev_info->feature_flags = dev->feature_flags; + dev_info->capabilities = aesni_gcm_pmd_capabilities; dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; dev_info->sym.max_nb_sessions = internals->max_nb_sessions; @@ -133,7 +195,7 @@ aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev, "aesni_gcm_pmd_%u_qp_%u", dev->data->dev_id, qp->id); - if (n > sizeof(qp->name)) + if (n >= sizeof(qp->name)) return -1; return 0; @@ -148,7 +210,7 @@ aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp, r = rte_ring_lookup(qp->name); if (r) { - if (r->prod.size >= ring_size) { + if (rte_ring_get_size(r) >= ring_size) { GCM_LOG_INFO("Reusing existing ring %s for processed" " packets", qp->name); return r; @@ -167,7 +229,7 @@ aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp, static int aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, const struct rte_cryptodev_qp_conf *qp_conf, - int socket_id) + int socket_id, struct rte_mempool *session_pool) { struct aesni_gcm_qp *qp = NULL; struct aesni_gcm_private *internals = dev->data->dev_private; @@ -188,14 +250,14 @@ aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, if (aesni_gcm_pmd_qp_set_unique_name(dev, qp)) goto qp_setup_cleanup; - qp->ops = &gcm_ops[internals->vector_mode]; + qp->ops = (const struct aesni_gcm_ops *)gcm_ops[internals->vector_mode]; qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp, qp_conf->nb_descriptors, socket_id); if (qp->processed_pkts == NULL) goto qp_setup_cleanup; - qp->sess_mp = dev->data->session_pool; + qp->sess_mp = session_pool; memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); @@ -239,32 +301,57 @@ aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) } /** Configure a aesni gcm session from a crypto xform chain */ -static void * -aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, void *sess) +static int +aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mempool) { + void *sess_private_data; + int ret; struct aesni_gcm_private *internals = dev->data->dev_private; if (unlikely(sess == NULL)) { GCM_LOG_ERR("invalid session struct"); - return NULL; + return -EINVAL; } - if (aesni_gcm_set_session_parameters(&gcm_ops[internals->vector_mode], - sess, xform) != 0) { + if (rte_mempool_get(mempool, &sess_private_data)) { + CDEV_LOG_ERR( + "Couldn't get object from session mempool"); + return -ENOMEM; + } + ret = aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode], + sess_private_data, xform); + if (ret != 0) { GCM_LOG_ERR("failed configure session parameters"); - return NULL; + + /* Return session to mempool */ + rte_mempool_put(mempool, sess_private_data); + return ret; } - return sess; + set_session_private_data(sess, dev->driver_id, + sess_private_data); + + return 0; } /** Clear the memory of session so it doesn't leave key material behind */ static void -aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess) +aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) { - if (sess) - memset(sess, 0, sizeof(struct aesni_gcm_session)); + uint8_t index = dev->driver_id; + void *sess_priv = get_session_private_data(sess, index); + + /* Zero out the whole structure */ + if (sess_priv) { + memset(sess_priv, 0, sizeof(struct aesni_gcm_session)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + set_session_private_data(sess, index, NULL); + rte_mempool_put(sess_mp, sess_priv); + } } struct rte_cryptodev_ops aesni_gcm_pmd_ops = {