/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2020 Intel Corporation
*/
#include <rte_common.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
static uint8_t cryptodev_driver_id;
+/* setup session handlers */
+static void
+set_func_ops(struct aesni_gcm_session *s, const struct aesni_gcm_ops *gcm_ops)
+{
+ s->ops.pre = gcm_ops->pre;
+ s->ops.init = gcm_ops->init;
+
+ switch (s->op) {
+ case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
+ s->ops.cipher = gcm_ops->enc;
+ s->ops.update = gcm_ops->update_enc;
+ s->ops.finalize = gcm_ops->finalize_enc;
+ break;
+ case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
+ s->ops.cipher = gcm_ops->dec;
+ s->ops.update = gcm_ops->update_dec;
+ s->ops.finalize = gcm_ops->finalize_dec;
+ break;
+ case AESNI_GMAC_OP_GENERATE:
+ case AESNI_GMAC_OP_VERIFY:
+ s->ops.finalize = gcm_ops->finalize_enc;
+ break;
+ }
+}
+
/** Parse crypto xform chain and set private session parameters */
int
aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
/* Select Crypto operation */
if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+ /* op == RTE_CRYPTO_AEAD_OP_DECRYPT */
else
sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
return -ENOTSUP;
}
-
/* IV check */
if (sess->iv.length != 16 && sess->iv.length != 12 &&
sess->iv.length != 0) {
return -EINVAL;
}
+ /* setup session handlers */
+ set_func_ops(sess, &gcm_ops[sess->key]);
+
+ /* pre-generate key */
gcm_ops[sess->key].pre(key, &sess->gdata_key);
/* Digest check */
&qp->gdata_ctx,
tag,
session->gen_digest_length);
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+ } else if (session->op == AESNI_GMAC_OP_GENERATE) {
+ qp->ops[session->key].gmac_init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ session->iv.length);
+
+ qp->ops[session->key].gmac_update(&session->gdata_key,
+ &qp->gdata_ctx, src,
+ (uint64_t)part_len);
+ total_len = data_length - part_len;
+
+ while (total_len) {
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+
+ src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ part_len = (m_src->data_len < total_len) ?
+ m_src->data_len : total_len;
+
+ qp->ops[session->key].gmac_update(&session->gdata_key,
+ &qp->gdata_ctx, src,
+ (uint64_t)part_len);
+ total_len -= part_len;
+ }
+
+ if (session->req_digest_length != session->gen_digest_length)
+ tag = qp->temp_digest;
+ else
+ tag = sym_op->auth.digest.data;
+
+ qp->ops[session->key].gmac_finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ tag,
+ session->gen_digest_length);
+ } else { /* AESNI_GMAC_OP_VERIFY */
+ qp->ops[session->key].gmac_init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ session->iv.length);
+
+ qp->ops[session->key].gmac_update(&session->gdata_key,
+ &qp->gdata_ctx, src,
+ (uint64_t)part_len);
+ total_len = data_length - part_len;
+
+ while (total_len) {
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+
+ src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ part_len = (m_src->data_len < total_len) ?
+ m_src->data_len : total_len;
+
+ qp->ops[session->key].gmac_update(&session->gdata_key,
+ &qp->gdata_ctx, src,
+ (uint64_t)part_len);
+ total_len -= part_len;
+ }
+
+ tag = qp->temp_digest;
+
+ qp->ops[session->key].gmac_finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ tag,
+ session->gen_digest_length);
+ }
+#else
} else if (session->op == AESNI_GMAC_OP_GENERATE) {
qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
tag,
session->gen_digest_length);
}
+#endif
+
+ return 0;
+}
+
+static inline void
+aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t errnum)
+{
+ uint32_t i;
+
+ for (i = 0; i < vec->num; i++)
+ vec->status[i] = errnum;
+}
+
+
+static inline int32_t
+aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
+ struct gcm_context_data *gdata_ctx, uint8_t *digest)
+{
+ if (s->req_digest_length != s->gen_digest_length) {
+ uint8_t tmpdigest[s->gen_digest_length];
+
+ s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
+ s->gen_digest_length);
+ memcpy(digest, tmpdigest, s->req_digest_length);
+ } else {
+ s->ops.finalize(&s->gdata_key, gdata_ctx, digest,
+ s->gen_digest_length);
+ }
return 0;
}
+static inline int32_t
+aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
+ struct gcm_context_data *gdata_ctx, uint8_t *digest)
+{
+ uint8_t tmpdigest[s->gen_digest_length];
+
+ s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
+ s->gen_digest_length);
+
+ return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0 :
+ EBADMSG;
+}
+
+static inline void
+aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
+ struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
+ void *iv, void *aad)
+{
+ uint32_t i;
+
+ /* init crypto operation */
+ s->ops.init(&s->gdata_key, gdata_ctx, iv, aad,
+ (uint64_t)s->aad_length);
+
+ /* update with sgl data */
+ for (i = 0; i < sgl->num; i++) {
+ struct rte_crypto_vec *vec = &sgl->vec[i];
+
+ s->ops.update(&s->gdata_key, gdata_ctx, vec->base, vec->base,
+ vec->len);
+ }
+}
+
+static inline void
+aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
+ struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
+ void *iv)
+{
+ s->ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
+ sgl->vec[0].len);
+}
+
+static inline uint32_t
+aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
+ struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
+{
+ uint32_t i, processed;
+
+ processed = 0;
+ for (i = 0; i < vec->num; ++i) {
+ aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
+ &vec->src_sgl[i], vec->iv[i].va,
+ vec->aad[i].va);
+ vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
+ gdata_ctx, vec->digest[i].va);
+ processed += (vec->status[i] == 0);
+ }
+
+ return processed;
+}
+
+static inline uint32_t
+aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
+ struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
+{
+ uint32_t i, processed;
+
+ processed = 0;
+ for (i = 0; i < vec->num; ++i) {
+ aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
+ &vec->src_sgl[i], vec->iv[i].va,
+ vec->aad[i].va);
+ vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
+ gdata_ctx, vec->digest[i].va);
+ processed += (vec->status[i] == 0);
+ }
+
+ return processed;
+}
+
+static inline uint32_t
+aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
+ struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
+{
+ uint32_t i, processed;
+
+ processed = 0;
+ for (i = 0; i < vec->num; ++i) {
+ if (vec->src_sgl[i].num != 1) {
+ vec->status[i] = ENOTSUP;
+ continue;
+ }
+
+ aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
+ &vec->src_sgl[i], vec->iv[i].va);
+ vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
+ gdata_ctx, vec->digest[i].va);
+ processed += (vec->status[i] == 0);
+ }
+
+ return processed;
+}
+
+static inline uint32_t
+aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
+ struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
+{
+ uint32_t i, processed;
+
+ processed = 0;
+ for (i = 0; i < vec->num; ++i) {
+ if (vec->src_sgl[i].num != 1) {
+ vec->status[i] = ENOTSUP;
+ continue;
+ }
+
+ aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
+ &vec->src_sgl[i], vec->iv[i].va);
+ vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
+ gdata_ctx, vec->digest[i].va);
+ processed += (vec->status[i] == 0);
+ }
+
+ return processed;
+}
+
+/** Process CPU crypto bulk operations */
+uint32_t
+aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess,
+ __rte_unused union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_sym_vec *vec)
+{
+ void *sess_priv;
+ struct aesni_gcm_session *s;
+ struct gcm_context_data gdata_ctx;
+
+ sess_priv = get_sym_session_private_data(sess, dev->driver_id);
+ if (unlikely(sess_priv == NULL)) {
+ aesni_gcm_fill_error_code(vec, EINVAL);
+ return 0;
+ }
+
+ s = sess_priv;
+ switch (s->op) {
+ case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
+ return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec);
+ case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
+ return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec);
+ case AESNI_GMAC_OP_GENERATE:
+ return aesni_gmac_sgl_generate(s, &gdata_ctx, vec);
+ case AESNI_GMAC_OP_VERIFY:
+ return aesni_gmac_sgl_verify(s, &gdata_ctx, vec);
+ default:
+ aesni_gcm_fill_error_code(vec, EINVAL);
+ return 0;
+ }
+}
+
/**
* Process a completed job and return rte_mbuf which job processed
*
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_IN_PLACE_SGL |
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
- RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
/* Check CPU for support for AES instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
init_mb_mgr_avx2(mb_mgr);
break;
case RTE_AESNI_GCM_AVX512:
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
- init_mb_mgr_avx512(mb_mgr);
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_VAES)) {
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+ init_mb_mgr_avx512(mb_mgr);
+ } else {
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+ init_mb_mgr_avx2(mb_mgr);
+ vector_mode = RTE_AESNI_GCM_AVX2;
+ }
break;
default:
AESNI_GCM_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+ internals->ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
+ internals->ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
+ internals->ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
+#endif
internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+ internals->ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
+ internals->ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
+ internals->ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
+#endif
internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+ internals->ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
+ internals->ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
+ internals->ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
+#endif
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
cryptodev_driver_id);
-
-
-RTE_INIT(aesni_gcm_init_log)
-{
- aesni_gcm_logtype_driver = rte_log_register("pmd.crypto.aesni_gcm");
-}
+RTE_LOG_REGISTER_DEFAULT(aesni_gcm_logtype_driver, NOTICE);