X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Faesni_gcm%2Faesni_gcm_pmd.c;h=a2d10a5726c1063ab73c65f46c756f903eed29dd;hb=9c2a5775c028;hp=76dce9cc5174a56cef753c099072f48d67dd8b95;hpb=b2bb3597470c651d1e7d056d761690fc7c0106b8;p=dpdk.git diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c index 76dce9cc51..a2d10a5726 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c @@ -30,8 +30,6 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include - #include #include #include @@ -40,136 +38,100 @@ #include #include #include +#include #include "aesni_gcm_pmd_private.h" -/** - * Global static parameter used to create a unique name for each AES-NI multi - * buffer crypto device. - */ -static unsigned unique_name_id; - -static inline int -create_unique_device_name(char *name, size_t size) -{ - int ret; - - if (name == NULL) - return -EINVAL; - - ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), - unique_name_id++); - if (ret < 0) - return ret; - return 0; -} - -static int -aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length, - uint8_t *aeskey, unsigned aeskey_length) -{ - uint8_t key[aeskey_length] __rte_aligned(16); - AES_KEY enc_key; - - if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0) - return -EFAULT; - - memcpy(key, aeskey, aeskey_length); - - if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0) - return -EFAULT; - - AES_encrypt(hsubkey, hsubkey, &enc_key); - - return 0; -} - -/** Get xform chain order */ -static int -aesni_gcm_get_mode(const struct rte_crypto_sym_xform *xform) -{ - /* - * GCM only supports authenticated encryption or authenticated - * decryption, all other options are invalid, so we must have exactly - * 2 xform structs chained together - */ - if (xform->next == NULL || xform->next->next != NULL) - return -1; - - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && - xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { - return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION; - } - - if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && - xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { - return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION; - } +/** GCM encode functions pointer table */ +static const struct aesni_gcm_ops aesni_gcm_enc[] = { + [AESNI_GCM_KEY_128] = { + aesni_gcm128_init, + aesni_gcm128_enc_update, + aesni_gcm128_enc_finalize + }, + [AESNI_GCM_KEY_256] = { + aesni_gcm256_init, + aesni_gcm256_enc_update, + aesni_gcm256_enc_finalize + } +}; - return -1; -} +/** GCM decode functions pointer table */ +static const struct aesni_gcm_ops aesni_gcm_dec[] = { + [AESNI_GCM_KEY_128] = { + aesni_gcm128_init, + aesni_gcm128_dec_update, + aesni_gcm128_dec_finalize + }, + [AESNI_GCM_KEY_256] = { + aesni_gcm256_init, + aesni_gcm256_dec_update, + aesni_gcm256_dec_finalize + } +}; /** Parse crypto xform chain and set private session parameters */ int -aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, - struct aesni_gcm_session *sess, +aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess, const struct rte_crypto_sym_xform *xform) { - const struct rte_crypto_sym_xform *auth_xform = NULL; - const struct rte_crypto_sym_xform *cipher_xform = NULL; + const struct rte_crypto_sym_xform *auth_xform; + const struct rte_crypto_sym_xform *cipher_xform; - uint8_t hsubkey[16] __rte_aligned(16) = { 0 }; - - /* Select Crypto operation - hash then cipher / cipher then hash */ - switch (aesni_gcm_get_mode(xform)) { - case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION: - sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION; + if (xform->next == NULL || xform->next->next != NULL) { + GCM_LOG_ERR("Two and only two chained xform required"); + return -EINVAL; + } - cipher_xform = xform; + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { auth_xform = xform->next; - break; - case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION: - sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION; - + cipher_xform = xform; + } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && + xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { auth_xform = xform; cipher_xform = xform->next; - break; - default: - GCM_LOG_ERR("Unsupported operation chain order parameter"); + } else { + GCM_LOG_ERR("Cipher and auth xform required"); return -EINVAL; } - /* We only support AES GCM */ - if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM && - auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM) + if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM && + (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM || + auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) { + GCM_LOG_ERR("We only support AES GCM and AES GMAC"); return -EINVAL; + } - /* Select cipher direction */ - if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION && - cipher_xform->cipher.op != - RTE_CRYPTO_CIPHER_OP_ENCRYPT) { - GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation " - "(DECRYPT) specified are an invalid selection"); - return -EINVAL; - } else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION && - cipher_xform->cipher.op != - RTE_CRYPTO_CIPHER_OP_DECRYPT) { - GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation " - "(ENCRYPT) specified are an invalid selection"); + /* Select Crypto operation */ + if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT && + auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) + sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION; + else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT && + auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) + sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION; + else { + GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or" + " Decrypt/Verify are valid only"); return -EINVAL; } - /* Expand GCM AES128 key */ - (*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data, - sess->gdata.expanded_keys); + /* Check key length and calculate GCM pre-compute. */ + switch (cipher_xform->cipher.key.length) { + case 16: + aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata); + sess->key = AESNI_GCM_KEY_128; - /* Calculate hash sub key here */ - aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey), - cipher_xform->cipher.key.data, - cipher_xform->cipher.key.length); + break; + case 32: + aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata); + sess->key = AESNI_GCM_KEY_256; - /* Calculate GCM pre-compute */ - (*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey); + break; + default: + GCM_LOG_ERR("Unsupported cipher key length"); + return -EINVAL; + } return 0; } @@ -193,10 +155,10 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op) return sess; sess = (struct aesni_gcm_session *) - ((struct rte_cryptodev_session *)_sess)->_private; + ((struct rte_cryptodev_sym_session *)_sess)->_private; - if (unlikely(aesni_gcm_set_session_parameters(qp->ops, - sess, op->xform) != 0)) { + if (unlikely(aesni_gcm_set_session_parameters(sess, + op->xform) != 0)) { rte_mempool_put(qp->sess_mp, _sess); sess = NULL; } @@ -216,19 +178,45 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op) * */ static int -process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op, +process_gcm_crypto_op(struct rte_crypto_sym_op *op, struct aesni_gcm_session *session) { uint8_t *src, *dst; - struct rte_mbuf *m = op->m_src; + struct rte_mbuf *m_src = op->m_src; + uint32_t offset = op->cipher.data.offset; + uint32_t part_len, total_len, data_len; + + RTE_ASSERT(m_src != NULL); + + while (offset >= m_src->data_len) { + offset -= m_src->data_len; + m_src = m_src->next; + + RTE_ASSERT(m_src != NULL); + } + + data_len = m_src->data_len - offset; + part_len = (data_len < op->cipher.data.length) ? data_len : + op->cipher.data.length; + + /* Destination buffer is required when segmented source buffer */ + RTE_ASSERT((part_len == op->cipher.data.length) || + ((part_len != op->cipher.data.length) && + (op->m_dst != NULL))); + /* Segmented destination buffer is not supported */ + RTE_ASSERT((op->m_dst == NULL) || + ((op->m_dst != NULL) && + rte_pktmbuf_is_contiguous(op->m_dst))); + - src = rte_pktmbuf_mtod(m, uint8_t *) + op->cipher.data.offset; dst = op->m_dst ? rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *, op->cipher.data.offset) : - rte_pktmbuf_mtod_offset(m, uint8_t *, + rte_pktmbuf_mtod_offset(op->m_src, uint8_t *, op->cipher.data.offset); + src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset); + /* sanity checks */ if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 && op->cipher.iv.length != 0) { @@ -241,51 +229,85 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op, * to set BE LSB to 1, driver expects that 16B is allocated */ if (op->cipher.iv.length == 12) { - op->cipher.iv.data[15] = 1; - } - - if (op->auth.aad.length != 12 && op->auth.aad.length != 8 && - op->auth.aad.length != 0) { - GCM_LOG_ERR("iv"); - return -1; + uint32_t *iv_padd = (uint32_t *)&op->cipher.iv.data[12]; + *iv_padd = rte_bswap32(1); } if (op->auth.digest.length != 16 && op->auth.digest.length != 12 && - op->auth.digest.length != 8 && - op->auth.digest.length != 0) { - GCM_LOG_ERR("iv"); + op->auth.digest.length != 8) { + GCM_LOG_ERR("digest"); return -1; } if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) { - (*qp->ops->gcm.enc)(&session->gdata, dst, src, - (uint64_t)op->cipher.data.length, + aesni_gcm_enc[session->key].init(&session->gdata, op->cipher.iv.data, op->auth.aad.data, - (uint64_t)op->auth.aad.length, + (uint64_t)op->auth.aad.length); + + aesni_gcm_enc[session->key].update(&session->gdata, dst, src, + (uint64_t)part_len); + total_len = op->cipher.data.length - part_len; + + while (total_len) { + dst += part_len; + m_src = m_src->next; + + RTE_ASSERT(m_src != NULL); + + src = rte_pktmbuf_mtod(m_src, uint8_t *); + part_len = (m_src->data_len < total_len) ? + m_src->data_len : total_len; + + aesni_gcm_enc[session->key].update(&session->gdata, + dst, src, + (uint64_t)part_len); + total_len -= part_len; + } + + aesni_gcm_enc[session->key].finalize(&session->gdata, op->auth.digest.data, (uint64_t)op->auth.digest.length); - } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) { - uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m, + } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */ + uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(op->m_dst ? + op->m_dst : op->m_src, op->auth.digest.length); if (!auth_tag) { - GCM_LOG_ERR("iv"); + GCM_LOG_ERR("auth_tag"); return -1; } - (*qp->ops->gcm.dec)(&session->gdata, dst, src, - (uint64_t)op->cipher.data.length, + aesni_gcm_dec[session->key].init(&session->gdata, op->cipher.iv.data, op->auth.aad.data, - (uint64_t)op->auth.aad.length, + (uint64_t)op->auth.aad.length); + + aesni_gcm_dec[session->key].update(&session->gdata, dst, src, + (uint64_t)part_len); + total_len = op->cipher.data.length - part_len; + + while (total_len) { + dst += part_len; + m_src = m_src->next; + + RTE_ASSERT(m_src != NULL); + + src = rte_pktmbuf_mtod(m_src, uint8_t *); + part_len = (m_src->data_len < total_len) ? + m_src->data_len : total_len; + + aesni_gcm_dec[session->key].update(&session->gdata, + dst, src, + (uint64_t)part_len); + total_len -= part_len; + } + + aesni_gcm_dec[session->key].finalize(&session->gdata, auth_tag, (uint64_t)op->auth.digest.length); - } else { - GCM_LOG_ERR("iv"); - return -1; } return 0; @@ -375,7 +397,7 @@ aesni_gcm_pmd_enqueue_burst(void *queue_pair, break; } - retval = process_gcm_crypto_op(qp, ops[i]->sym, sess); + retval = process_gcm_crypto_op(ops[i]->sym, sess); if (retval < 0) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; qp->qp_stats.enqueue_err_count++; @@ -407,13 +429,21 @@ aesni_gcm_pmd_dequeue_burst(void *queue_pair, static int aesni_gcm_remove(const char *name); static int -aesni_gcm_create(const char *name, - struct rte_crypto_vdev_init_params *init_params) +aesni_gcm_create(struct rte_crypto_vdev_init_params *init_params) { struct rte_cryptodev *dev; - char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; struct aesni_gcm_private *internals; - enum aesni_gcm_vector_mode vector_mode; + + if (init_params->name[0] == '\0') { + int ret = rte_cryptodev_pmd_create_dev_name( + init_params->name, + RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD)); + + if (ret < 0) { + GCM_LOG_ERR("failed to create unique name"); + return ret; + } + } /* Check CPU for support for AES instruction set */ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) { @@ -421,27 +451,7 @@ aesni_gcm_create(const char *name, return -EFAULT; } - /* Check CPU for supported vector instruction set */ - if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) - vector_mode = RTE_AESNI_GCM_AVX2; - else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) - vector_mode = RTE_AESNI_GCM_AVX; - else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1)) - vector_mode = RTE_AESNI_GCM_SSE; - else { - GCM_LOG_ERR("Vector instructions are not supported by CPU"); - return -EFAULT; - } - - /* create a unique device name */ - if (create_unique_device_name(crypto_dev_name, - RTE_CRYPTODEV_NAME_MAX_LEN) != 0) { - GCM_LOG_ERR("failed to create unique cryptodev name"); - return -EINVAL; - } - - - dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name, + dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name, sizeof(struct aesni_gcm_private), init_params->socket_id); if (dev == NULL) { GCM_LOG_ERR("failed to create cryptodev vdev"); @@ -457,36 +467,20 @@ aesni_gcm_create(const char *name, dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | - RTE_CRYPTODEV_FF_CPU_AESNI; - - switch (vector_mode) { - case RTE_AESNI_GCM_SSE: - dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE; - break; - case RTE_AESNI_GCM_AVX: - dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX; - break; - case RTE_AESNI_GCM_AVX2: - dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2; - break; - default: - break; - } + RTE_CRYPTODEV_FF_CPU_AESNI | + RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; - /* Set vector instructions mode supported */ internals = dev->data->dev_private; - internals->vector_mode = vector_mode; - internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; internals->max_nb_sessions = init_params->max_nb_sessions; return 0; init_error: - GCM_LOG_ERR("driver %s: create failed", name); + GCM_LOG_ERR("driver %s: create failed", init_params->name); - aesni_gcm_remove(crypto_dev_name); + aesni_gcm_remove(init_params->name); return -EFAULT; } @@ -496,19 +490,23 @@ aesni_gcm_probe(const char *name, const char *input_args) struct rte_crypto_vdev_init_params init_params = { RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS, RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS, - rte_socket_id() + rte_socket_id(), + {0} }; rte_cryptodev_parse_vdev_init_params(&init_params, input_args); RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name, init_params.socket_id); + if (init_params.name[0] != '\0') + RTE_LOG(INFO, PMD, " User defined name = %s\n", + init_params.name); RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n", init_params.max_nb_queue_pairs); RTE_LOG(INFO, PMD, " Max number of sessions = %d\n", init_params.max_nb_sessions); - return aesni_gcm_create(name, &init_params); + return aesni_gcm_create(&init_params); } static int @@ -528,8 +526,9 @@ static struct rte_vdev_driver aesni_gcm_pmd_drv = { .remove = aesni_gcm_remove }; -DRIVER_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv); -DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD, +RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv); +RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd); +RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD, "max_nb_queue_pairs= " "max_nb_sessions= " "socket_id=");