X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fqat%2Fqat_sym_session.c;h=3727d564d8fa179b023023623e3e5919ad91cf9e;hb=75c348b06aa0aeb5a74819f995663d8b709a7c3e;hp=1d58220a93bd6fb0894064b3a07bb43b63ea4e52;hpb=e3d99884428f3c8ed1f773520900b9466baa3ad4;p=dpdk.git diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c index 1d58220a93..3727d564d8 100644 --- a/drivers/crypto/qat/qat_sym_session.c +++ b/drivers/crypto/qat/qat_sym_session.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) - * Copyright(c) 2015-2018 Intel Corporation + * Copyright(c) 2015-2019 Intel Corporation */ #include /* Needed to calculate pre-compute values */ @@ -19,6 +19,41 @@ #include "qat_sym_session.h" #include "qat_sym_pmd.h" +/* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */ +static const uint8_t sha1InitialState[] = { + 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba, + 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0}; + +/* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */ +static const uint8_t sha224InitialState[] = { + 0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, + 0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, + 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4}; + +/* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */ +static const uint8_t sha256InitialState[] = { + 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3, + 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05, + 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19}; + +/* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ +static const uint8_t sha384InitialState[] = { + 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29, + 0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, + 0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, + 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, + 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, + 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4}; + +/* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ +static const uint8_t sha512InitialState[] = { + 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae, + 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, + 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, + 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, + 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, + 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79}; + /** Frees a context previously created * Depends on openssl libcrypto */ @@ -35,7 +70,7 @@ bpi_cipher_ctx_free(void *bpi_ctx) static int bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo, enum rte_crypto_cipher_operation direction __rte_unused, - uint8_t *key, void **ctx) + const uint8_t *key, uint16_t key_length, void **ctx) { const EVP_CIPHER *algo = NULL; int ret; @@ -49,7 +84,10 @@ bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo, if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI) algo = EVP_des_ecb(); else - algo = EVP_aes_128_ecb(); + if (key_length == ICP_QAT_HW_AES_128_KEY_SZ) + algo = EVP_aes_128_ecb(); + else + algo = EVP_aes_256_ecb(); /* IV will be ECB encrypted whether direction is encrypt or decrypt*/ if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) { @@ -242,7 +280,8 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev, session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; break; case RTE_CRYPTO_CIPHER_NULL: - session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; + session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL; + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; break; case RTE_CRYPTO_CIPHER_KASUMI_F8: if (qat_sym_validate_kasumi_key(cipher_xform->key.length, @@ -285,6 +324,7 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev, cipher_xform->algo, cipher_xform->op, cipher_xform->key.data, + cipher_xform->key.length, &session->bpi_ctx); if (ret != 0) { QAT_LOG(ERR, "failed to create DES BPI ctx"); @@ -303,6 +343,7 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev, cipher_xform->algo, cipher_xform->op, cipher_xform->key.data, + cipher_xform->key.length, &session->bpi_ctx); if (ret != 0) { QAT_LOG(ERR, "failed to create AES BPI ctx"); @@ -333,10 +374,23 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev, } session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; break; + case RTE_CRYPTO_CIPHER_AES_XTS: + if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) { + QAT_LOG(ERR, "AES-XTS-192 not supported"); + ret = -EINVAL; + goto error_out; + } + if (qat_sym_validate_aes_key((cipher_xform->key.length/2), + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid AES-XTS cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE; + break; case RTE_CRYPTO_CIPHER_3DES_ECB: case RTE_CRYPTO_CIPHER_AES_ECB: case RTE_CRYPTO_CIPHER_AES_F8: - case RTE_CRYPTO_CIPHER_AES_XTS: case RTE_CRYPTO_CIPHER_ARC4: QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u", cipher_xform->algo); @@ -402,6 +456,79 @@ qat_sym_session_configure(struct rte_cryptodev *dev, return 0; } +static void +qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session, + uint8_t hash_flag) +{ + struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr; + struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl = + (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *) + session->fw_req.cd_ctrl.content_desc_ctrl_lw; + + /* Set the Use Extended Protocol Flags bit in LW 1 */ + QAT_FIELD_SET(header->comn_req_flags, + QAT_COMN_EXT_FLAGS_USED, + QAT_COMN_EXT_FLAGS_BITPOS, + QAT_COMN_EXT_FLAGS_MASK); + + /* Set Hash Flags in LW 28 */ + cd_ctrl->hash_flags |= hash_flag; + + /* Set proto flags in LW 1 */ + switch (session->qat_cipher_alg) { + case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_SNOW_3G_PROTO); + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( + header->serv_specif_flags, 0); + break; + case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( + header->serv_specif_flags, + ICP_QAT_FW_LA_ZUC_3G_PROTO); + break; + default: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( + header->serv_specif_flags, 0); + break; + } +} + +static void +qat_sym_session_handle_mixed(const struct rte_cryptodev *dev, + struct qat_sym_session *session) +{ + const struct qat_sym_dev_private *qat_private = dev->data->dev_private; + enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities & + QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3; + + if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && + session->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + session->min_qat_dev_gen = min_dev_gen; + qat_sym_session_set_ext_hash_flags(session, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS); + } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && + session->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + session->min_qat_dev_gen = min_dev_gen; + qat_sym_session_set_ext_hash_flags(session, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS); + } else if ((session->aes_cmac || + session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && + (session->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + session->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { + session->min_qat_dev_gen = min_dev_gen; + qat_sym_session_set_ext_hash_flags(session, 0); + } +} + int qat_sym_session_set_parameters(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *session_private) @@ -436,7 +563,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, break; case ICP_QAT_FW_LA_CMD_CIPHER_HASH: if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { - ret = qat_sym_session_configure_aead(xform, + ret = qat_sym_session_configure_aead(dev, xform, session); if (ret < 0) return ret; @@ -449,11 +576,13 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, xform, session); if (ret < 0) return ret; + /* Special handling of mixed hash+cipher algorithms */ + qat_sym_session_handle_mixed(dev, session); } break; case ICP_QAT_FW_LA_CMD_HASH_CIPHER: if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { - ret = qat_sym_session_configure_aead(xform, + ret = qat_sym_session_configure_aead(dev, xform, session); if (ret < 0) return ret; @@ -466,6 +595,8 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, xform, session); if (ret < 0) return ret; + /* Special handling of mixed hash+cipher algorithms */ + qat_sym_session_handle_mixed(dev, session); } break; case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: @@ -489,6 +620,73 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, return 0; } +static int +qat_sym_session_handle_single_pass(struct qat_sym_dev_private *internals, + struct qat_sym_session *session, + struct rte_crypto_aead_xform *aead_xform) +{ + enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen; + + if (qat_dev_gen == QAT_GEN3 && + aead_xform->iv.length == QAT_AES_GCM_SPC_IV_SIZE) { + /* Use faster Single-Pass GCM */ + struct icp_qat_fw_la_cipher_req_params *cipher_param = + (void *) &session->fw_req.serv_specif_rqpars; + + session->is_single_pass = 1; + session->min_qat_dev_gen = QAT_GEN3; + session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER; + session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE; + session->cipher_iv.offset = aead_xform->iv.offset; + session->cipher_iv.length = aead_xform->iv.length; + if (qat_sym_session_aead_create_cd_cipher(session, + aead_xform->key.data, aead_xform->key.length)) + return -EINVAL; + session->aad_len = aead_xform->aad_length; + session->digest_length = aead_xform->digest_length; + if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) { + session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + session->auth_op = ICP_QAT_HW_AUTH_GENERATE; + ICP_QAT_FW_LA_RET_AUTH_SET( + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_RET_AUTH_RES); + } else { + session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; + session->auth_op = ICP_QAT_HW_AUTH_VERIFY; + ICP_QAT_FW_LA_CMP_AUTH_SET( + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_CMP_AUTH_RES); + } + ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET( + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_SINGLE_PASS_PROTO); + ICP_QAT_FW_LA_PROTO_SET( + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + session->fw_req.comn_hdr.service_cmd_id = + ICP_QAT_FW_LA_CMD_CIPHER; + session->cd.cipher.cipher_config.val = + ICP_QAT_HW_CIPHER_CONFIG_BUILD( + ICP_QAT_HW_CIPHER_AEAD_MODE, + session->qat_cipher_alg, + ICP_QAT_HW_CIPHER_NO_CONVERT, + session->qat_dir); + QAT_FIELD_SET(session->cd.cipher.cipher_config.val, + aead_xform->digest_length, + QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS, + QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK); + session->cd.cipher.cipher_config.reserved = + ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER( + aead_xform->aad_length); + cipher_param->spc_aad_sz = aead_xform->aad_length; + cipher_param->spc_auth_res_sz = aead_xform->digest_length; + } + return 0; +} + int qat_sym_session_configure_auth(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, @@ -496,10 +694,35 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, { struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform); struct qat_sym_dev_private *internals = dev->data->dev_private; - uint8_t *key_data = auth_xform->key.data; + const uint8_t *key_data = auth_xform->key.data; uint8_t key_length = auth_xform->key.length; + session->aes_cmac = 0; + + session->auth_iv.offset = auth_xform->iv.offset; + session->auth_iv.length = auth_xform->iv.length; + session->auth_mode = ICP_QAT_HW_AUTH_MODE1; switch (auth_xform->algo) { + case RTE_CRYPTO_AUTH_SHA1: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1; + session->auth_mode = ICP_QAT_HW_AUTH_MODE0; + break; + case RTE_CRYPTO_AUTH_SHA224: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224; + session->auth_mode = ICP_QAT_HW_AUTH_MODE0; + break; + case RTE_CRYPTO_AUTH_SHA256: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256; + session->auth_mode = ICP_QAT_HW_AUTH_MODE0; + break; + case RTE_CRYPTO_AUTH_SHA384: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384; + session->auth_mode = ICP_QAT_HW_AUTH_MODE0; + break; + case RTE_CRYPTO_AUTH_SHA512: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512; + session->auth_mode = ICP_QAT_HW_AUTH_MODE0; + break; case RTE_CRYPTO_AUTH_SHA1_HMAC: session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1; break; @@ -518,6 +741,10 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_AES_XCBC_MAC: session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC; break; + case RTE_CRYPTO_AUTH_AES_CMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC; + session->aes_cmac = 1; + break; case RTE_CRYPTO_AUTH_AES_GMAC: if (qat_sym_validate_aes_key(auth_xform->key.length, &session->qat_cipher_alg) != 0) { @@ -526,6 +753,8 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, } session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; + if (session->auth_iv.length == 0) + session->auth_iv.length = AES_GCM_J0_LEN; break; case RTE_CRYPTO_AUTH_SNOW3G_UIA2: @@ -549,13 +778,7 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, } session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3; break; - case RTE_CRYPTO_AUTH_SHA1: - case RTE_CRYPTO_AUTH_SHA256: - case RTE_CRYPTO_AUTH_SHA512: - case RTE_CRYPTO_AUTH_SHA224: - case RTE_CRYPTO_AUTH_SHA384: case RTE_CRYPTO_AUTH_MD5: - case RTE_CRYPTO_AUTH_AES_CMAC: case RTE_CRYPTO_AUTH_AES_CBC_MAC: QAT_LOG(ERR, "Crypto: Unsupported hash alg %u", auth_xform->algo); @@ -566,9 +789,6 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, return -EINVAL; } - session->auth_iv.offset = auth_xform->iv.offset; - session->auth_iv.length = auth_xform->iv.length; - if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) { if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) { session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH; @@ -628,7 +848,8 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, } int -qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform, +qat_sym_session_configure_aead(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, struct qat_sym_session *session) { struct rte_crypto_aead_xform *aead_xform = &xform->aead; @@ -641,6 +862,8 @@ qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform, session->cipher_iv.offset = xform->aead.iv.offset; session->cipher_iv.length = xform->aead.iv.length; + session->auth_mode = ICP_QAT_HW_AUTH_MODE1; + switch (aead_xform->algo) { case RTE_CRYPTO_AEAD_AES_GCM: if (qat_sym_validate_aes_key(aead_xform->key.length, @@ -650,6 +873,9 @@ qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform, } session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; + if (session->cipher_iv.length == 0) + session->cipher_iv.length = AES_GCM_J0_LEN; + break; case RTE_CRYPTO_AEAD_AES_CCM: if (qat_sym_validate_aes_key(aead_xform->key.length, @@ -666,6 +892,17 @@ qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform, return -EINVAL; } + session->is_single_pass = 0; + if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) { + /* Use faster Single-Pass GCM if possible */ + int res = qat_sym_session_handle_single_pass( + dev->data->dev_private, session, aead_xform); + if (res < 0) + return res; + if (session->is_single_pass) + return 0; + } + if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT && aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) || (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT && @@ -817,6 +1054,8 @@ static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg) return ICP_QAT_HW_SHA512_STATE1_SZ; case ICP_QAT_HW_AUTH_ALGO_MD5: return ICP_QAT_HW_MD5_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC: + return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ; case ICP_QAT_HW_AUTH_ALGO_DELIMITER: /* return maximum digest size in this case */ return ICP_QAT_HW_SHA512_STATE1_SZ; @@ -843,6 +1082,8 @@ static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg) return SHA512_CBLOCK; case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: return 16; + case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC: + return ICP_QAT_HW_AES_BLK_SZ; case ICP_QAT_HW_AUTH_ALGO_MD5: return MD5_CBLOCK; case ICP_QAT_HW_AUTH_ALGO_DELIMITER: @@ -991,11 +1232,28 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg, #define HMAC_OPAD_VALUE 0x5c #define HASH_XCBC_PRECOMP_KEY_NUM 3 +static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ]; + +static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived) +{ + int i; + + derived[0] = base[0] << 1; + for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) { + derived[i] = base[i] << 1; + derived[i - 1] |= base[i] >> 7; + } + + if (base[0] & 0x80) + derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB; +} + static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, const uint8_t *auth_key, uint16_t auth_keylen, uint8_t *p_state_buf, - uint16_t *p_state_len) + uint16_t *p_state_len, + uint8_t aes_cmac) { int block_size; uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)]; @@ -1003,47 +1261,91 @@ static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, int i; if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) { - static uint8_t qat_aes_xcbc_key_seed[ - ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = { - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, - 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, - }; - uint8_t *in = NULL; - uint8_t *out = p_state_buf; - int x; - AES_KEY enc_key; + /* CMAC */ + if (aes_cmac) { + AES_KEY enc_key; + uint8_t *in = NULL; + uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ]; + uint8_t *k1, *k2; - in = rte_zmalloc("working mem for key", - ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16); - if (in == NULL) { - QAT_LOG(ERR, "Failed to alloc memory"); - return -ENOMEM; - } + auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ; + + in = rte_zmalloc("AES CMAC K1", + ICP_QAT_HW_AES_128_KEY_SZ, 16); + + if (in == NULL) { + QAT_LOG(ERR, "Failed to alloc memory"); + return -ENOMEM; + } + + rte_memcpy(in, AES_CMAC_SEED, + ICP_QAT_HW_AES_128_KEY_SZ); + rte_memcpy(p_state_buf, auth_key, auth_keylen); - rte_memcpy(in, qat_aes_xcbc_key_seed, - ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); - for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) { if (AES_set_encrypt_key(auth_key, auth_keylen << 3, &enc_key) != 0) { - rte_free(in - - (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ)); - memset(out - - (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ), - 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); + rte_free(in); return -EFAULT; } - AES_encrypt(in, out, &enc_key); - in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ; - out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ; + + AES_encrypt(in, k0, &enc_key); + + k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ; + k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ; + + aes_cmac_key_derive(k0, k1); + aes_cmac_key_derive(k1, k2); + + memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ); + *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ; + rte_free(in); + return 0; + } else { + static uint8_t qat_aes_xcbc_key_seed[ + ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = { + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + }; + + uint8_t *in = NULL; + uint8_t *out = p_state_buf; + int x; + AES_KEY enc_key; + + in = rte_zmalloc("working mem for key", + ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16); + if (in == NULL) { + QAT_LOG(ERR, "Failed to alloc memory"); + return -ENOMEM; + } + + rte_memcpy(in, qat_aes_xcbc_key_seed, + ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); + for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) { + if (AES_set_encrypt_key(auth_key, + auth_keylen << 3, + &enc_key) != 0) { + rte_free(in - + (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ)); + memset(out - + (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ), + 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); + return -EFAULT; + } + AES_encrypt(in, out, &enc_key); + in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ; + out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ; + } + *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ; + rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ); + return 0; } - *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ; - rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ); - return 0; + } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) || (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) { uint8_t *in = NULL; @@ -1074,8 +1376,8 @@ static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, } block_size = qat_hash_get_block_size(hash_alg); - if (block_size <= 0) - return -EFAULT; + if (block_size < 0) + return block_size; /* init ipad and opad from key and xor with fixed values */ memset(ipad, 0, block_size); memset(opad, 0, block_size); @@ -1189,7 +1491,7 @@ qat_get_crypto_proto_flag(uint16_t flags) } int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc, - uint8_t *cipherkey, + const uint8_t *cipherkey, uint32_t cipherkeylen) { struct icp_qat_hw_cipher_algo_blk *cipher; @@ -1344,7 +1646,7 @@ int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc, } int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, - uint8_t *authkey, + const uint8_t *authkey, uint32_t authkeylen, uint32_t aad_length, uint32_t digestsize, @@ -1361,7 +1663,7 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, struct icp_qat_fw_la_auth_req_params *auth_param = (struct icp_qat_fw_la_auth_req_params *) ((char *)&req_tmpl->serv_specif_rqpars + - sizeof(struct icp_qat_fw_la_cipher_req_params)); + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); uint16_t state1_size = 0, state2_size = 0; uint16_t hash_offset, cd_size; uint32_t *aad_len = NULL; @@ -1412,16 +1714,25 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr; hash->auth_config.reserved = 0; hash->auth_config.config = - ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, + ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode, cdesc->qat_hash_alg, digestsize); - if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 + if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0 + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 - || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL + ) hash->auth_counter.counter = 0; - else - hash->auth_counter.counter = rte_bswap32( - qat_hash_get_block_size(cdesc->qat_hash_alg)); + else { + int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg); + + if (block_size < 0) + return block_size; + hash->auth_counter.counter = rte_bswap32(block_size); + } cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup); @@ -1430,40 +1741,90 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, */ switch (cdesc->qat_hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SHA1: - if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { + /* Plain SHA-1 */ + rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState, + sizeof(sha1InitialState)); + state1_size = qat_hash_get_state1_size( + cdesc->qat_hash_alg); + break; + } + /* SHA-1 HMAC */ + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey, + authkeylen, cdesc->cd_cur_ptr, &state1_size, + cdesc->aes_cmac)) { QAT_LOG(ERR, "(SHA)precompute failed"); return -EFAULT; } state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8); break; case ICP_QAT_HW_AUTH_ALGO_SHA224: - if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { + /* Plain SHA-224 */ + rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState, + sizeof(sha224InitialState)); + state1_size = qat_hash_get_state1_size( + cdesc->qat_hash_alg); + break; + } + /* SHA-224 HMAC */ + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey, + authkeylen, cdesc->cd_cur_ptr, &state1_size, + cdesc->aes_cmac)) { QAT_LOG(ERR, "(SHA)precompute failed"); return -EFAULT; } state2_size = ICP_QAT_HW_SHA224_STATE2_SZ; break; case ICP_QAT_HW_AUTH_ALGO_SHA256: - if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { + /* Plain SHA-256 */ + rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState, + sizeof(sha256InitialState)); + state1_size = qat_hash_get_state1_size( + cdesc->qat_hash_alg); + break; + } + /* SHA-256 HMAC */ + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey, + authkeylen, cdesc->cd_cur_ptr, &state1_size, + cdesc->aes_cmac)) { QAT_LOG(ERR, "(SHA)precompute failed"); return -EFAULT; } state2_size = ICP_QAT_HW_SHA256_STATE2_SZ; break; case ICP_QAT_HW_AUTH_ALGO_SHA384: - if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { + /* Plain SHA-384 */ + rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState, + sizeof(sha384InitialState)); + state1_size = qat_hash_get_state1_size( + cdesc->qat_hash_alg); + break; + } + /* SHA-384 HMAC */ + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey, + authkeylen, cdesc->cd_cur_ptr, &state1_size, + cdesc->aes_cmac)) { QAT_LOG(ERR, "(SHA)precompute failed"); return -EFAULT; } state2_size = ICP_QAT_HW_SHA384_STATE2_SZ; break; case ICP_QAT_HW_AUTH_ALGO_SHA512: - if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { + /* Plain SHA-512 */ + rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState, + sizeof(sha512InitialState)); + state1_size = qat_hash_get_state1_size( + cdesc->qat_hash_alg); + break; + } + /* SHA-512 HMAC */ + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey, + authkeylen, cdesc->cd_cur_ptr, &state1_size, + cdesc->aes_cmac)) { QAT_LOG(ERR, "(SHA)precompute failed"); return -EFAULT; } @@ -1471,10 +1832,16 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, break; case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC: state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ; + + if (cdesc->aes_cmac) + memset(cdesc->cd_cur_ptr, 0, state1_size); if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC, authkey, authkeylen, cdesc->cd_cur_ptr + state1_size, - &state2_size)) { - QAT_LOG(ERR, "(XCBC)precompute failed"); + &state2_size, cdesc->aes_cmac)) { + cdesc->aes_cmac ? QAT_LOG(ERR, + "(CMAC)precompute failed") + : QAT_LOG(ERR, + "(XCBC)precompute failed"); return -EFAULT; } break; @@ -1482,9 +1849,9 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ; - if (qat_sym_do_precomputes(cdesc->qat_hash_alg, - authkey, authkeylen, cdesc->cd_cur_ptr + state1_size, - &state2_size)) { + if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey, + authkeylen, cdesc->cd_cur_ptr + state1_size, + &state2_size, cdesc->aes_cmac)) { QAT_LOG(ERR, "(GCM)precompute failed"); return -EFAULT; } @@ -1542,9 +1909,9 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, break; case ICP_QAT_HW_AUTH_ALGO_MD5: - if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, - authkey, authkeylen, cdesc->cd_cur_ptr, - &state1_size)) { + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey, + authkeylen, cdesc->cd_cur_ptr, &state1_size, + cdesc->aes_cmac)) { QAT_LOG(ERR, "(MD5)precompute failed"); return -EFAULT; } @@ -1656,6 +2023,9 @@ int qat_sym_validate_aes_docsisbpi_key(int key_len, case ICP_QAT_HW_AES_128_KEY_SZ: *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; break; + case ICP_QAT_HW_AES_256_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_AES256; + break; default: return -EINVAL; }