X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Faesni_gcm%2Faesni_gcm_pmd.c;h=83e544809e0109c0b66c3569affd65d6401e4c53;hb=a0a344a8f728e052425d7b2ffa08158c6710ae2f;hp=99031822b7e266a1f71ca28f5f702a672887da85;hpb=b79e4c00af0e7cfb8601ab0208659d226b82bd10;p=dpdk.git diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c index 99031822b7..83e544809e 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c @@ -1,79 +1,24 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include -#include #include #include #include -#include -#include +#include #include #include #include #include "aesni_gcm_pmd_private.h" -/** GCM encode functions pointer table */ -static const struct aesni_gcm_ops aesni_gcm_enc[] = { - [AESNI_GCM_KEY_128] = { - aesni_gcm128_init, - aesni_gcm128_enc_update, - aesni_gcm128_enc_finalize - }, - [AESNI_GCM_KEY_256] = { - aesni_gcm256_init, - aesni_gcm256_enc_update, - aesni_gcm256_enc_finalize - } -}; - -/** GCM decode functions pointer table */ -static const struct aesni_gcm_ops aesni_gcm_dec[] = { - [AESNI_GCM_KEY_128] = { - aesni_gcm128_init, - aesni_gcm128_dec_update, - aesni_gcm128_dec_finalize - }, - [AESNI_GCM_KEY_256] = { - aesni_gcm256_init, - aesni_gcm256_dec_update, - aesni_gcm256_dec_finalize - } -}; +static uint8_t cryptodev_driver_id; /** Parse crypto xform chain and set private session parameters */ int -aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess, +aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, + struct aesni_gcm_session *sess, const struct rte_crypto_sym_xform *xform) { const struct rte_crypto_sym_xform *auth_xform; @@ -88,7 +33,7 @@ aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess, if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) { GCM_LOG_ERR("Only AES GMAC is supported as an " "authentication only algorithm"); - return -EINVAL; + return -ENOTSUP; } /* Set IV parameters */ sess->iv.offset = auth_xform->auth.iv.offset; @@ -111,7 +56,7 @@ aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess, if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) { GCM_LOG_ERR("The only combined operation " "supported is AES GCM"); - return -EINVAL; + return -ENOTSUP; } /* Set IV parameters */ @@ -127,11 +72,11 @@ aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess, key_length = aead_xform->aead.key.length; key = aead_xform->aead.key.data; - sess->aad_length = aead_xform->aead.add_auth_data_length; + sess->aad_length = aead_xform->aead.aad_length; digest_length = aead_xform->aead.digest_length; } else { GCM_LOG_ERR("Wrong xform type, has to be AEAD or authentication"); - return -EINVAL; + return -ENOTSUP; } @@ -145,20 +90,21 @@ aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess, /* Check key length and calculate GCM pre-compute. */ switch (key_length) { case 16: - aesni_gcm128_pre(key, &sess->gdata); sess->key = AESNI_GCM_KEY_128; - + break; + case 24: + sess->key = AESNI_GCM_KEY_192; break; case 32: - aesni_gcm256_pre(key, &sess->gdata); sess->key = AESNI_GCM_KEY_256; - break; default: - GCM_LOG_ERR("Unsupported key length"); + GCM_LOG_ERR("Invalid key length"); return -EINVAL; } + gcm_ops[sess->key].precomp(key, &sess->gdata_key); + /* Digest check */ if (digest_length != 16 && digest_length != 12 && @@ -179,32 +125,43 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op) struct rte_crypto_sym_op *sym_op = op->sym; if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { - if (unlikely(sym_op->session->dev_type - != RTE_CRYPTODEV_AESNI_GCM_PMD)) - return sess; - - sess = (struct aesni_gcm_session *)sym_op->session->_private; + if (likely(sym_op->session != NULL)) + sess = (struct aesni_gcm_session *) + get_session_private_data( + sym_op->session, + cryptodev_driver_id); } else { void *_sess; + void *_sess_private_data = NULL; + + if (rte_mempool_get(qp->sess_mp, (void **)&_sess)) + return NULL; - if (rte_mempool_get(qp->sess_mp, &_sess)) - return sess; + if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data)) + return NULL; - sess = (struct aesni_gcm_session *) - ((struct rte_cryptodev_sym_session *)_sess)->_private; + sess = (struct aesni_gcm_session *)_sess_private_data; - if (unlikely(aesni_gcm_set_session_parameters(sess, - sym_op->xform) != 0)) { + if (unlikely(aesni_gcm_set_session_parameters(qp->ops, + sess, sym_op->xform) != 0)) { rte_mempool_put(qp->sess_mp, _sess); + rte_mempool_put(qp->sess_mp, _sess_private_data); sess = NULL; } + sym_op->session = (struct rte_cryptodev_sym_session *)_sess; + set_session_private_data(sym_op->session, cryptodev_driver_id, + _sess_private_data); } + + if (unlikely(sess == NULL)) + op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return sess; } /** - * Process a crypto operation and complete a JOB_AES_HMAC job structure for - * submission to the multi buffer library for processing. + * Process a crypto operation, calling + * the GCM API from the multi buffer library. * * @param qp queue pair * @param op symmetric crypto operation @@ -214,7 +171,7 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op) * */ static int -process_gcm_crypto_op(struct rte_crypto_op *op, +process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, struct aesni_gcm_session *session) { uint8_t *src, *dst; @@ -237,7 +194,7 @@ process_gcm_crypto_op(struct rte_crypto_op *op, RTE_ASSERT(m_src != NULL); - while (offset >= m_src->data_len) { + while (offset >= m_src->data_len && data_length != 0) { offset -= m_src->data_len; m_src = m_src->next; @@ -279,12 +236,14 @@ process_gcm_crypto_op(struct rte_crypto_op *op, if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) { - aesni_gcm_enc[session->key].init(&session->gdata, + qp->ops[session->key].init(&session->gdata_key, + &qp->gdata_ctx, iv_ptr, sym_op->aead.aad.data, (uint64_t)session->aad_length); - aesni_gcm_enc[session->key].update(&session->gdata, dst, src, + qp->ops[session->key].update_enc(&session->gdata_key, + &qp->gdata_ctx, dst, src, (uint64_t)part_len); total_len = data_length - part_len; @@ -298,31 +257,27 @@ process_gcm_crypto_op(struct rte_crypto_op *op, part_len = (m_src->data_len < total_len) ? m_src->data_len : total_len; - aesni_gcm_enc[session->key].update(&session->gdata, - dst, src, + qp->ops[session->key].update_enc(&session->gdata_key, + &qp->gdata_ctx, dst, src, (uint64_t)part_len); total_len -= part_len; } - aesni_gcm_enc[session->key].finalize(&session->gdata, + qp->ops[session->key].finalize(&session->gdata_key, + &qp->gdata_ctx, sym_op->aead.digest.data, (uint64_t)session->digest_length); } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) { - uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ? - sym_op->m_dst : sym_op->m_src, - session->digest_length); - - if (!auth_tag) { - GCM_LOG_ERR("auth_tag"); - return -1; - } + uint8_t *auth_tag = qp->temp_digest; - aesni_gcm_dec[session->key].init(&session->gdata, + qp->ops[session->key].init(&session->gdata_key, + &qp->gdata_ctx, iv_ptr, sym_op->aead.aad.data, (uint64_t)session->aad_length); - aesni_gcm_dec[session->key].update(&session->gdata, dst, src, + qp->ops[session->key].update_dec(&session->gdata_key, + &qp->gdata_ctx, dst, src, (uint64_t)part_len); total_len = data_length - part_len; @@ -336,39 +291,38 @@ process_gcm_crypto_op(struct rte_crypto_op *op, part_len = (m_src->data_len < total_len) ? m_src->data_len : total_len; - aesni_gcm_dec[session->key].update(&session->gdata, + qp->ops[session->key].update_dec(&session->gdata_key, + &qp->gdata_ctx, dst, src, (uint64_t)part_len); total_len -= part_len; } - aesni_gcm_dec[session->key].finalize(&session->gdata, + qp->ops[session->key].finalize(&session->gdata_key, + &qp->gdata_ctx, auth_tag, (uint64_t)session->digest_length); } else if (session->op == AESNI_GMAC_OP_GENERATE) { - aesni_gcm_enc[session->key].init(&session->gdata, + qp->ops[session->key].init(&session->gdata_key, + &qp->gdata_ctx, iv_ptr, src, (uint64_t)data_length); - aesni_gcm_enc[session->key].finalize(&session->gdata, + qp->ops[session->key].finalize(&session->gdata_key, + &qp->gdata_ctx, sym_op->auth.digest.data, (uint64_t)session->digest_length); } else { /* AESNI_GMAC_OP_VERIFY */ - uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ? - sym_op->m_dst : sym_op->m_src, - session->digest_length); + uint8_t *auth_tag = qp->temp_digest; - if (!auth_tag) { - GCM_LOG_ERR("auth_tag"); - return -1; - } - - aesni_gcm_dec[session->key].init(&session->gdata, + qp->ops[session->key].init(&session->gdata_key, + &qp->gdata_ctx, iv_ptr, src, (uint64_t)data_length); - aesni_gcm_dec[session->key].finalize(&session->gdata, + qp->ops[session->key].finalize(&session->gdata_key, + &qp->gdata_ctx, auth_tag, (uint64_t)session->digest_length); } @@ -387,13 +341,10 @@ process_gcm_crypto_op(struct rte_crypto_op *op, * - Returns NULL on invalid job */ static void -post_process_gcm_crypto_op(struct rte_crypto_op *op) +post_process_gcm_crypto_op(struct aesni_gcm_qp *qp, + struct rte_crypto_op *op, + struct aesni_gcm_session *session) { - struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src; - - struct aesni_gcm_session *session = - (struct aesni_gcm_session *)op->sym->session->_private; - op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; /* Verify digest if required */ @@ -401,8 +352,7 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op) session->op == AESNI_GMAC_OP_VERIFY) { uint8_t *digest; - uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *, - m->data_len - session->digest_length); + uint8_t *tag = (uint8_t *)&qp->temp_digest; if (session->op == AESNI_GMAC_OP_VERIFY) digest = op->sym->auth.digest.data; @@ -418,9 +368,6 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op) if (memcmp(tag, digest, session->digest_length) != 0) op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; - - /* trim area used for digest from mbuf */ - rte_pktmbuf_trim(m, session->digest_length); } } @@ -428,6 +375,7 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op) * Process a completed GCM request * * @param qp Queue Pair to process + * @param op Crypto operation * @param job JOB_AES_HMAC job * * @return @@ -435,12 +383,17 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op) */ static void handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp, - struct rte_crypto_op *op) + struct rte_crypto_op *op, + struct aesni_gcm_session *sess) { - post_process_gcm_crypto_op(op); + post_process_gcm_crypto_op(qp, op, sess); /* Free session if a session-less crypto op */ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { + memset(sess, 0, sizeof(struct aesni_gcm_session)); + memset(op->sym->session, 0, + rte_cryptodev_get_header_session_size()); + rte_mempool_put(qp->sess_mp, sess); rte_mempool_put(qp->sess_mp, op->sym->session); op->sym->session = NULL; } @@ -468,14 +421,14 @@ aesni_gcm_pmd_dequeue_burst(void *queue_pair, break; } - retval = process_gcm_crypto_op(ops[i], sess); + retval = process_gcm_crypto_op(qp, ops[i], sess); if (retval < 0) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; qp->qp_stats.dequeue_err_count++; break; } - handle_completed_gcm_crypto_op(qp, ops[i]); + handle_completed_gcm_crypto_op(qp, ops[i], sess); } qp->qp_stats.dequeued_count += i; @@ -503,14 +456,11 @@ static int aesni_gcm_remove(struct rte_vdev_device *vdev); static int aesni_gcm_create(const char *name, struct rte_vdev_device *vdev, - struct rte_crypto_vdev_init_params *init_params) + struct rte_cryptodev_pmd_init_params *init_params) { struct rte_cryptodev *dev; struct aesni_gcm_private *internals; - - if (init_params->name[0] == '\0') - snprintf(init_params->name, sizeof(init_params->name), - "%s", name); + enum aesni_gcm_vector_mode vector_mode; /* Check CPU for support for AES instruction set */ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) { @@ -518,15 +468,21 @@ aesni_gcm_create(const char *name, return -EFAULT; } - dev = rte_cryptodev_vdev_pmd_init(init_params->name, - sizeof(struct aesni_gcm_private), init_params->socket_id, - vdev); + dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - GCM_LOG_ERR("failed to create cryptodev vdev"); - goto init_error; + GCM_LOG_ERR("driver %s: create failed", init_params->name); + return -ENODEV; } - dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD; + /* Check CPU for supported vector instruction set */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) + vector_mode = RTE_AESNI_GCM_AVX2; + else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) + vector_mode = RTE_AESNI_GCM_AVX; + else + vector_mode = RTE_AESNI_GCM_SSE; + + dev->driver_id = cryptodev_driver_id; dev->dev_ops = rte_aesni_gcm_pmd_ops; /* register rx/tx burst functions for data path */ @@ -538,28 +494,39 @@ aesni_gcm_create(const char *name, RTE_CRYPTODEV_FF_CPU_AESNI | RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; + switch (vector_mode) { + case RTE_AESNI_GCM_SSE: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE; + break; + case RTE_AESNI_GCM_AVX: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX; + break; + case RTE_AESNI_GCM_AVX2: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2; + break; + default: + break; + } + internals = dev->data->dev_private; + internals->vector_mode = vector_mode; + internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; internals->max_nb_sessions = init_params->max_nb_sessions; return 0; - -init_error: - GCM_LOG_ERR("driver %s: create failed", init_params->name); - - aesni_gcm_remove(vdev); - return -EFAULT; } static int aesni_gcm_probe(struct rte_vdev_device *vdev) { - struct rte_crypto_vdev_init_params init_params = { - RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS, + struct rte_cryptodev_pmd_init_params init_params = { + "", + sizeof(struct aesni_gcm_private), rte_socket_id(), - {0} + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS }; const char *name; const char *input_args; @@ -568,17 +535,7 @@ aesni_gcm_probe(struct rte_vdev_device *vdev) if (name == NULL) return -EINVAL; input_args = rte_vdev_device_args(vdev); - rte_cryptodev_vdev_parse_init_params(&init_params, input_args); - - RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name, - init_params.socket_id); - if (init_params.name[0] != '\0') - RTE_LOG(INFO, PMD, " User defined name = %s\n", - init_params.name); - RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n", - init_params.max_nb_queue_pairs); - RTE_LOG(INFO, PMD, " Max number of sessions = %d\n", - init_params.max_nb_sessions); + rte_cryptodev_pmd_parse_input_args(&init_params, input_args); return aesni_gcm_create(name, vdev, &init_params); } @@ -586,16 +543,18 @@ aesni_gcm_probe(struct rte_vdev_device *vdev) static int aesni_gcm_remove(struct rte_vdev_device *vdev) { + struct rte_cryptodev *cryptodev; const char *name; name = rte_vdev_device_name(vdev); if (name == NULL) return -EINVAL; - GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n", - name, rte_socket_id()); + cryptodev = rte_cryptodev_pmd_get_named_dev(name); + if (cryptodev == NULL) + return -ENODEV; - return 0; + return rte_cryptodev_pmd_destroy(cryptodev); } static struct rte_vdev_driver aesni_gcm_pmd_drv = { @@ -603,9 +562,13 @@ static struct rte_vdev_driver aesni_gcm_pmd_drv = { .remove = aesni_gcm_remove }; +static struct cryptodev_driver aesni_gcm_crypto_drv; + RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD, "max_nb_queue_pairs= " "max_nb_sessions= " "socket_id="); +RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv, + cryptodev_driver_id);