X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Faesni_gcm%2Faesni_gcm_pmd.c;h=1d2a0ce00750bb6a014f0a8281f74055c272eba6;hb=0136dd3727165dcf08bb8a654548707577956a47;hp=ebdf7c35a8e487fcd43cdad7da313981f332f646;hpb=ceb863938708d3e38414650827a8cf433d42e035;p=dpdk.git diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c index ebdf7c35a8..1d2a0ce007 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2020 Intel Corporation */ #include @@ -15,6 +15,31 @@ static uint8_t cryptodev_driver_id; +/* setup session handlers */ +static void +set_func_ops(struct aesni_gcm_session *s, const struct aesni_gcm_ops *gcm_ops) +{ + s->ops.pre = gcm_ops->pre; + s->ops.init = gcm_ops->init; + + switch (s->op) { + case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION: + s->ops.cipher = gcm_ops->enc; + s->ops.update = gcm_ops->update_enc; + s->ops.finalize = gcm_ops->finalize_enc; + break; + case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION: + s->ops.cipher = gcm_ops->dec; + s->ops.update = gcm_ops->update_dec; + s->ops.finalize = gcm_ops->finalize_dec; + break; + case AESNI_GMAC_OP_GENERATE: + case AESNI_GMAC_OP_VERIFY: + s->ops.finalize = gcm_ops->finalize_enc; + break; + } +} + /** Parse crypto xform chain and set private session parameters */ int aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, @@ -24,7 +49,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, const struct rte_crypto_sym_xform *auth_xform; const struct rte_crypto_sym_xform *aead_xform; uint8_t key_length; - uint8_t *key; + const uint8_t *key; /* AES-GMAC */ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { @@ -65,6 +90,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, /* Select Crypto operation */ if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION; + /* op == RTE_CRYPTO_AEAD_OP_DECRYPT */ else sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION; @@ -78,7 +104,6 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, return -ENOTSUP; } - /* IV check */ if (sess->iv.length != 16 && sess->iv.length != 12 && sess->iv.length != 0) { @@ -89,20 +114,24 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, /* Check key length and calculate GCM pre-compute. */ switch (key_length) { case 16: - sess->key = AESNI_GCM_KEY_128; + sess->key = GCM_KEY_128; break; case 24: - sess->key = AESNI_GCM_KEY_192; + sess->key = GCM_KEY_192; break; case 32: - sess->key = AESNI_GCM_KEY_256; + sess->key = GCM_KEY_256; break; default: AESNI_GCM_LOG(ERR, "Invalid key length"); return -EINVAL; } - gcm_ops[sess->key].precomp(key, &sess->gdata_key); + /* setup session handlers */ + set_func_ops(sess, &gcm_ops[sess->key]); + + /* pre-generate key */ + gcm_ops[sess->key].pre(key, &sess->gdata_key); /* Digest check */ if (sess->req_digest_length > 16) { @@ -151,7 +180,8 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op) if (rte_mempool_get(qp->sess_mp, (void **)&_sess)) return NULL; - if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data)) + if (rte_mempool_get(qp->sess_mp_priv, + (void **)&_sess_private_data)) return NULL; sess = (struct aesni_gcm_session *)_sess_private_data; @@ -159,7 +189,7 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op) if (unlikely(aesni_gcm_set_session_parameters(qp->ops, sess, sym_op->xform) != 0)) { rte_mempool_put(qp->sess_mp, _sess); - rte_mempool_put(qp->sess_mp, _sess_private_data); + rte_mempool_put(qp->sess_mp_priv, _sess_private_data); sess = NULL; } sym_op->session = (struct rte_cryptodev_sym_session *)_sess; @@ -195,6 +225,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, uint32_t offset, data_offset, data_length; uint32_t part_len, total_len, data_len; uint8_t *tag; + unsigned int oop = 0; if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION || session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) { @@ -216,27 +247,28 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, RTE_ASSERT(m_src != NULL); } + src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset); + data_len = m_src->data_len - offset; part_len = (data_len < data_length) ? data_len : data_length; - /* Destination buffer is required when segmented source buffer */ - RTE_ASSERT((part_len == data_length) || - ((part_len != data_length) && - (sym_op->m_dst != NULL))); - /* Segmented destination buffer is not supported */ RTE_ASSERT((sym_op->m_dst == NULL) || ((sym_op->m_dst != NULL) && rte_pktmbuf_is_contiguous(sym_op->m_dst))); - - dst = sym_op->m_dst ? - rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *, - data_offset) : - rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *, + /* In-place */ + if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src)) + dst = src; + /* Out-of-place */ + else { + oop = 1; + /* Segmented destination buffer is not supported if operation is + * Out-of-place */ + RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst)); + dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *, data_offset); - - src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset); + } iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset); @@ -254,12 +286,15 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, total_len = data_length - part_len; while (total_len) { - dst += part_len; m_src = m_src->next; RTE_ASSERT(m_src != NULL); src = rte_pktmbuf_mtod(m_src, uint8_t *); + if (oop) + dst += part_len; + else + dst = src; part_len = (m_src->data_len < total_len) ? m_src->data_len : total_len; @@ -274,7 +309,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, else tag = sym_op->aead.digest.data; - qp->ops[session->key].finalize(&session->gdata_key, + qp->ops[session->key].finalize_enc(&session->gdata_key, &qp->gdata_ctx, tag, session->gen_digest_length); @@ -291,12 +326,15 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, total_len = data_length - part_len; while (total_len) { - dst += part_len; m_src = m_src->next; RTE_ASSERT(m_src != NULL); src = rte_pktmbuf_mtod(m_src, uint8_t *); + if (oop) + dst += part_len; + else + dst = src; part_len = (m_src->data_len < total_len) ? m_src->data_len : total_len; @@ -308,7 +346,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, } tag = qp->temp_digest; - qp->ops[session->key].finalize(&session->gdata_key, + qp->ops[session->key].finalize_dec(&session->gdata_key, &qp->gdata_ctx, tag, session->gen_digest_length); @@ -322,7 +360,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, tag = qp->temp_digest; else tag = sym_op->auth.digest.data; - qp->ops[session->key].finalize(&session->gdata_key, + qp->ops[session->key].finalize_enc(&session->gdata_key, &qp->gdata_ctx, tag, session->gen_digest_length); @@ -338,7 +376,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, * the bytes passed. */ tag = qp->temp_digest; - qp->ops[session->key].finalize(&session->gdata_key, + qp->ops[session->key].finalize_enc(&session->gdata_key, &qp->gdata_ctx, tag, session->gen_digest_length); @@ -347,6 +385,191 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, return 0; } +static inline void +aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t errnum) +{ + uint32_t i; + + for (i = 0; i < vec->num; i++) + vec->status[i] = errnum; +} + + +static inline int32_t +aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s, + struct gcm_context_data *gdata_ctx, uint8_t *digest) +{ + if (s->req_digest_length != s->gen_digest_length) { + uint8_t tmpdigest[s->gen_digest_length]; + + s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest, + s->gen_digest_length); + memcpy(digest, tmpdigest, s->req_digest_length); + } else { + s->ops.finalize(&s->gdata_key, gdata_ctx, digest, + s->gen_digest_length); + } + + return 0; +} + +static inline int32_t +aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s, + struct gcm_context_data *gdata_ctx, uint8_t *digest) +{ + uint8_t tmpdigest[s->gen_digest_length]; + + s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest, + s->gen_digest_length); + + return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0 : + EBADMSG; +} + +static inline void +aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s, + struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl, + void *iv, void *aad) +{ + uint32_t i; + + /* init crypto operation */ + s->ops.init(&s->gdata_key, gdata_ctx, iv, aad, + (uint64_t)s->aad_length); + + /* update with sgl data */ + for (i = 0; i < sgl->num; i++) { + struct rte_crypto_vec *vec = &sgl->vec[i]; + + s->ops.update(&s->gdata_key, gdata_ctx, vec->base, vec->base, + vec->len); + } +} + +static inline void +aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s, + struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl, + void *iv) +{ + s->ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base, + sgl->vec[0].len); +} + +static inline uint32_t +aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s, + struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec) +{ + uint32_t i, processed; + + processed = 0; + for (i = 0; i < vec->num; ++i) { + aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, + &vec->sgl[i], vec->iv[i], vec->aad[i]); + vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s, + gdata_ctx, vec->digest[i]); + processed += (vec->status[i] == 0); + } + + return processed; +} + +static inline uint32_t +aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s, + struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec) +{ + uint32_t i, processed; + + processed = 0; + for (i = 0; i < vec->num; ++i) { + aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, + &vec->sgl[i], vec->iv[i], vec->aad[i]); + vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s, + gdata_ctx, vec->digest[i]); + processed += (vec->status[i] == 0); + } + + return processed; +} + +static inline uint32_t +aesni_gmac_sgl_generate(struct aesni_gcm_session *s, + struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec) +{ + uint32_t i, processed; + + processed = 0; + for (i = 0; i < vec->num; ++i) { + if (vec->sgl[i].num != 1) { + vec->status[i] = ENOTSUP; + continue; + } + + aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, + &vec->sgl[i], vec->iv[i]); + vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s, + gdata_ctx, vec->digest[i]); + processed += (vec->status[i] == 0); + } + + return processed; +} + +static inline uint32_t +aesni_gmac_sgl_verify(struct aesni_gcm_session *s, + struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec) +{ + uint32_t i, processed; + + processed = 0; + for (i = 0; i < vec->num; ++i) { + if (vec->sgl[i].num != 1) { + vec->status[i] = ENOTSUP; + continue; + } + + aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, + &vec->sgl[i], vec->iv[i]); + vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s, + gdata_ctx, vec->digest[i]); + processed += (vec->status[i] == 0); + } + + return processed; +} + +/** Process CPU crypto bulk operations */ +uint32_t +aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess, + __rte_unused union rte_crypto_sym_ofs ofs, + struct rte_crypto_sym_vec *vec) +{ + void *sess_priv; + struct aesni_gcm_session *s; + struct gcm_context_data gdata_ctx; + + sess_priv = get_sym_session_private_data(sess, dev->driver_id); + if (unlikely(sess_priv == NULL)) { + aesni_gcm_fill_error_code(vec, EINVAL); + return 0; + } + + s = sess_priv; + switch (s->op) { + case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION: + return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec); + case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION: + return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec); + case AESNI_GMAC_OP_GENERATE: + return aesni_gmac_sgl_generate(s, &gdata_ctx, vec); + case AESNI_GMAC_OP_VERIFY: + return aesni_gmac_sgl_verify(s, &gdata_ctx, vec); + default: + aesni_gcm_fill_error_code(vec, EINVAL); + return 0; + } +} + /** * Process a completed job and return rte_mbuf which job processed * @@ -418,8 +641,9 @@ handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp, if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { memset(sess, 0, sizeof(struct aesni_gcm_session)); memset(op->sym->session, 0, - rte_cryptodev_sym_get_header_session_size()); - rte_mempool_put(qp->sess_mp, sess); + rte_cryptodev_sym_get_existing_header_session_size( + op->sym->session)); + rte_mempool_put(qp->sess_mp_priv, sess); rte_mempool_put(qp->sess_mp, op->sym->session); op->sym->session = NULL; } @@ -487,12 +711,8 @@ aesni_gcm_create(const char *name, struct rte_cryptodev *dev; struct aesni_gcm_private *internals; enum aesni_gcm_vector_mode vector_mode; + MB_MGR *mb_mgr; - /* Check CPU for support for AES instruction set */ - if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) { - AESNI_GCM_LOG(ERR, "AES instructions not supported by CPU"); - return -EFAULT; - } dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { AESNI_GCM_LOG(ERR, "driver %s: create failed", @@ -501,7 +721,9 @@ aesni_gcm_create(const char *name, } /* Check CPU for supported vector instruction set */ - if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) + vector_mode = RTE_AESNI_GCM_AVX512; + else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) vector_mode = RTE_AESNI_GCM_AVX2; else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) vector_mode = RTE_AESNI_GCM_AVX; @@ -517,27 +739,76 @@ aesni_gcm_create(const char *name, dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | - RTE_CRYPTODEV_FF_CPU_AESNI | + RTE_CRYPTODEV_FF_IN_PLACE_SGL | RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | - RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | + RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO | + RTE_CRYPTODEV_FF_SYM_SESSIONLESS; + + /* Check CPU for support for AES instruction set */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI; + else + AESNI_GCM_LOG(WARNING, "AES instructions not supported by CPU"); + + mb_mgr = alloc_mb_mgr(0); + if (mb_mgr == NULL) + return -ENOMEM; switch (vector_mode) { case RTE_AESNI_GCM_SSE: dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE; + init_mb_mgr_sse(mb_mgr); break; case RTE_AESNI_GCM_AVX: dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX; + init_mb_mgr_avx(mb_mgr); break; case RTE_AESNI_GCM_AVX2: dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2; + init_mb_mgr_avx2(mb_mgr); break; - default: + case RTE_AESNI_GCM_AVX512: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2; + init_mb_mgr_avx512(mb_mgr); break; + default: + AESNI_GCM_LOG(ERR, "Unsupported vector mode %u\n", vector_mode); + goto error_exit; } internals = dev->data->dev_private; internals->vector_mode = vector_mode; + internals->mb_mgr = mb_mgr; + + /* Set arch independent function pointers, based on key size */ + internals->ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc; + internals->ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec; + internals->ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre; + internals->ops[GCM_KEY_128].init = mb_mgr->gcm128_init; + internals->ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update; + internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update; + internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize; + internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize; + + internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc; + internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec; + internals->ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre; + internals->ops[GCM_KEY_192].init = mb_mgr->gcm192_init; + internals->ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update; + internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update; + internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize; + internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize; + + internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc; + internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec; + internals->ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre; + internals->ops[GCM_KEY_256].init = mb_mgr->gcm256_init; + internals->ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update; + internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update; + internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize; + internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize; internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; @@ -549,6 +820,14 @@ aesni_gcm_create(const char *name, #endif return 0; + +error_exit: + if (mb_mgr) + free_mb_mgr(mb_mgr); + + rte_cryptodev_pmd_destroy(dev); + + return -1; } static int @@ -576,6 +855,7 @@ static int aesni_gcm_remove(struct rte_vdev_device *vdev) { struct rte_cryptodev *cryptodev; + struct aesni_gcm_private *internals; const char *name; name = rte_vdev_device_name(vdev); @@ -586,6 +866,10 @@ aesni_gcm_remove(struct rte_vdev_device *vdev) if (cryptodev == NULL) return -ENODEV; + internals = cryptodev->data->dev_private; + + free_mb_mgr(internals->mb_mgr); + return rte_cryptodev_pmd_destroy(cryptodev); } @@ -603,9 +887,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD, "socket_id="); RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver, cryptodev_driver_id); - - -RTE_INIT(aesni_gcm_init_log) -{ - aesni_gcm_logtype_driver = rte_log_register("pmd.crypto.aesni_gcm"); -} +RTE_LOG_REGISTER(aesni_gcm_logtype_driver, pmd.crypto.aesni_gcm, NOTICE);