X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fqat%2Fqat_sym_session.c;h=23d059bf8436ccbfc291191d3af68788246a2cda;hb=23f627e0ed28;hp=d8b21bc2bdea0ffc4be9cd335e5a98c4f93b5b07;hpb=2aab3ff3d8cf65a503592e0fb83b718bd8ae22ce;p=dpdk.git diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c index d8b21bc2bd..23d059bf84 100644 --- a/drivers/crypto/qat/qat_sym_session.c +++ b/drivers/crypto/qat/qat_sym_session.c @@ -14,11 +14,49 @@ #include #include #include +#ifdef RTE_LIB_SECURITY +#include +#endif #include "qat_logs.h" #include "qat_sym_session.h" #include "qat_sym_pmd.h" +/* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */ +static const uint8_t sha1InitialState[] = { + 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba, + 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0}; + +/* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */ +static const uint8_t sha224InitialState[] = { + 0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, + 0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, + 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4}; + +/* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */ +static const uint8_t sha256InitialState[] = { + 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3, + 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05, + 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19}; + +/* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ +static const uint8_t sha384InitialState[] = { + 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29, + 0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, + 0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, + 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, + 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, + 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4}; + +/* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ +static const uint8_t sha512InitialState[] = { + 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae, + 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, + 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, + 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, + 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, + 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79}; + /** Frees a context previously created * Depends on openssl libcrypto */ @@ -464,18 +502,23 @@ qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session, } static void -qat_sym_session_handle_mixed(struct qat_sym_session *session) +qat_sym_session_handle_mixed(const struct rte_cryptodev *dev, + struct qat_sym_session *session) { + const struct qat_sym_dev_private *qat_private = dev->data->dev_private; + enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities & + QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3; + if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && session->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { - session->min_qat_dev_gen = QAT_GEN3; + session->min_qat_dev_gen = min_dev_gen; qat_sym_session_set_ext_hash_flags(session, 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS); } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && session->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { - session->min_qat_dev_gen = QAT_GEN3; + session->min_qat_dev_gen = min_dev_gen; qat_sym_session_set_ext_hash_flags(session, 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS); } else if ((session->aes_cmac || @@ -484,7 +527,7 @@ qat_sym_session_handle_mixed(struct qat_sym_session *session) ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || session->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { - session->min_qat_dev_gen = QAT_GEN3; + session->min_qat_dev_gen = min_dev_gen; qat_sym_session_set_ext_hash_flags(session, 0); } } @@ -497,8 +540,16 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, int ret; int qat_cmd_id; + /* Verify the session physical address is known */ + rte_iova_t session_paddr = rte_mempool_virt2iova(session); + if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) { + QAT_LOG(ERR, + "Session physical address unknown. Bad memory pool."); + return -EINVAL; + } + /* Set context descriptor physical address */ - session->cd_paddr = rte_mempool_virt2iova(session) + + session->cd_paddr = session_paddr + offsetof(struct qat_sym_session, cd); session->min_qat_dev_gen = QAT_GEN1; @@ -537,7 +588,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, if (ret < 0) return ret; /* Special handling of mixed hash+cipher algorithms */ - qat_sym_session_handle_mixed(session); + qat_sym_session_handle_mixed(dev, session); } break; case ICP_QAT_FW_LA_CMD_HASH_CIPHER: @@ -556,7 +607,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, if (ret < 0) return ret; /* Special handling of mixed hash+cipher algorithms */ - qat_sym_session_handle_mixed(session); + qat_sym_session_handle_mixed(dev, session); } break; case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: @@ -581,69 +632,68 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, } static int -qat_sym_session_handle_single_pass(struct qat_sym_dev_private *internals, - struct qat_sym_session *session, +qat_sym_session_handle_single_pass(struct qat_sym_session *session, struct rte_crypto_aead_xform *aead_xform) { - enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen; - - if (qat_dev_gen == QAT_GEN3 && - aead_xform->iv.length == QAT_AES_GCM_SPC_IV_SIZE) { - /* Use faster Single-Pass GCM */ - struct icp_qat_fw_la_cipher_req_params *cipher_param = - (void *) &session->fw_req.serv_specif_rqpars; + struct icp_qat_fw_la_cipher_req_params *cipher_param = + (void *) &session->fw_req.serv_specif_rqpars; - session->is_single_pass = 1; - session->min_qat_dev_gen = QAT_GEN3; - session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER; + session->is_single_pass = 1; + session->min_qat_dev_gen = QAT_GEN3; + session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER; + if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) { session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE; - session->cipher_iv.offset = aead_xform->iv.offset; - session->cipher_iv.length = aead_xform->iv.length; - if (qat_sym_session_aead_create_cd_cipher(session, - aead_xform->key.data, aead_xform->key.length)) - return -EINVAL; - session->aad_len = aead_xform->aad_length; - session->digest_length = aead_xform->digest_length; - if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) { - session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; - session->auth_op = ICP_QAT_HW_AUTH_GENERATE; - ICP_QAT_FW_LA_RET_AUTH_SET( - session->fw_req.comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_RET_AUTH_RES); - } else { - session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; - session->auth_op = ICP_QAT_HW_AUTH_VERIFY; - ICP_QAT_FW_LA_CMP_AUTH_SET( - session->fw_req.comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_CMP_AUTH_RES); - } - ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET( - session->fw_req.comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_SINGLE_PASS_PROTO); - ICP_QAT_FW_LA_PROTO_SET( - session->fw_req.comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_NO_PROTO); ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( - session->fw_req.comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); - session->fw_req.comn_hdr.service_cmd_id = - ICP_QAT_FW_LA_CMD_CIPHER; - session->cd.cipher.cipher_config.val = - ICP_QAT_HW_CIPHER_CONFIG_BUILD( - ICP_QAT_HW_CIPHER_AEAD_MODE, - session->qat_cipher_alg, - ICP_QAT_HW_CIPHER_NO_CONVERT, - session->qat_dir); - QAT_FIELD_SET(session->cd.cipher.cipher_config.val, - aead_xform->digest_length, - QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS, - QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK); - session->cd.cipher.cipher_config.reserved = - ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER( - aead_xform->aad_length); - cipher_param->spc_aad_sz = aead_xform->aad_length; - cipher_param->spc_auth_res_sz = aead_xform->digest_length; + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + } else { + /* Chacha-Poly is special case that use QAT CTR mode */ + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; } + session->cipher_iv.offset = aead_xform->iv.offset; + session->cipher_iv.length = aead_xform->iv.length; + if (qat_sym_session_aead_create_cd_cipher(session, + aead_xform->key.data, aead_xform->key.length)) + return -EINVAL; + session->aad_len = aead_xform->aad_length; + session->digest_length = aead_xform->digest_length; + if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) { + session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + session->auth_op = ICP_QAT_HW_AUTH_GENERATE; + ICP_QAT_FW_LA_RET_AUTH_SET( + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_RET_AUTH_RES); + } else { + session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; + session->auth_op = ICP_QAT_HW_AUTH_VERIFY; + ICP_QAT_FW_LA_CMP_AUTH_SET( + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_CMP_AUTH_RES); + } + ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET( + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_SINGLE_PASS_PROTO); + ICP_QAT_FW_LA_PROTO_SET( + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + session->fw_req.comn_hdr.service_cmd_id = + ICP_QAT_FW_LA_CMD_CIPHER; + session->cd.cipher.cipher_config.val = + ICP_QAT_HW_CIPHER_CONFIG_BUILD( + ICP_QAT_HW_CIPHER_AEAD_MODE, + session->qat_cipher_alg, + ICP_QAT_HW_CIPHER_NO_CONVERT, + session->qat_dir); + QAT_FIELD_SET(session->cd.cipher.cipher_config.val, + aead_xform->digest_length, + QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS, + QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK); + session->cd.cipher.cipher_config.reserved = + ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER( + aead_xform->aad_length); + cipher_param->spc_aad_sz = aead_xform->aad_length; + cipher_param->spc_auth_res_sz = aead_xform->digest_length; + return 0; } @@ -658,7 +708,31 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, uint8_t key_length = auth_xform->key.length; session->aes_cmac = 0; + session->auth_iv.offset = auth_xform->iv.offset; + session->auth_iv.length = auth_xform->iv.length; + session->auth_mode = ICP_QAT_HW_AUTH_MODE1; + switch (auth_xform->algo) { + case RTE_CRYPTO_AUTH_SHA1: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1; + session->auth_mode = ICP_QAT_HW_AUTH_MODE0; + break; + case RTE_CRYPTO_AUTH_SHA224: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224; + session->auth_mode = ICP_QAT_HW_AUTH_MODE0; + break; + case RTE_CRYPTO_AUTH_SHA256: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256; + session->auth_mode = ICP_QAT_HW_AUTH_MODE0; + break; + case RTE_CRYPTO_AUTH_SHA384: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384; + session->auth_mode = ICP_QAT_HW_AUTH_MODE0; + break; + case RTE_CRYPTO_AUTH_SHA512: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512; + session->auth_mode = ICP_QAT_HW_AUTH_MODE0; + break; case RTE_CRYPTO_AUTH_SHA1_HMAC: session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1; break; @@ -689,6 +763,8 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, } session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; + if (session->auth_iv.length == 0) + session->auth_iv.length = AES_GCM_J0_LEN; break; case RTE_CRYPTO_AUTH_SNOW3G_UIA2: @@ -712,11 +788,6 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, } session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3; break; - case RTE_CRYPTO_AUTH_SHA1: - case RTE_CRYPTO_AUTH_SHA256: - case RTE_CRYPTO_AUTH_SHA512: - case RTE_CRYPTO_AUTH_SHA224: - case RTE_CRYPTO_AUTH_SHA384: case RTE_CRYPTO_AUTH_MD5: case RTE_CRYPTO_AUTH_AES_CBC_MAC: QAT_LOG(ERR, "Crypto: Unsupported hash alg %u", @@ -728,9 +799,6 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, return -EINVAL; } - session->auth_iv.offset = auth_xform->iv.offset; - session->auth_iv.length = auth_xform->iv.length; - if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) { if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) { session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH; @@ -796,6 +864,10 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev, { struct rte_crypto_aead_xform *aead_xform = &xform->aead; enum rte_crypto_auth_operation crypto_operation; + struct qat_sym_dev_private *internals = + dev->data->dev_private; + enum qat_device_gen qat_dev_gen = + internals->qat_dev->qat_dev_gen; /* * Store AEAD IV parameters as cipher IV, @@ -804,6 +876,9 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev, session->cipher_iv.offset = xform->aead.iv.offset; session->cipher_iv.length = xform->aead.iv.length; + session->auth_mode = ICP_QAT_HW_AUTH_MODE1; + + session->is_single_pass = 0; switch (aead_xform->algo) { case RTE_CRYPTO_AEAD_AES_GCM: if (qat_sym_validate_aes_key(aead_xform->key.length, @@ -813,6 +888,14 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev, } session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; + if (qat_dev_gen > QAT_GEN2 && aead_xform->iv.length == + QAT_AES_GCM_SPC_IV_SIZE) { + return qat_sym_session_handle_single_pass(session, + aead_xform); + } + if (session->cipher_iv.length == 0) + session->cipher_iv.length = AES_GCM_J0_LEN; + break; case RTE_CRYPTO_AEAD_AES_CCM: if (qat_sym_validate_aes_key(aead_xform->key.length, @@ -823,23 +906,19 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev, session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC; break; + case RTE_CRYPTO_AEAD_CHACHA20_POLY1305: + if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ) + return -EINVAL; + session->qat_cipher_alg = + ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305; + return qat_sym_session_handle_single_pass(session, + aead_xform); default: QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n", aead_xform->algo); return -EINVAL; } - session->is_single_pass = 0; - if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) { - /* Use faster Single-Pass GCM if possible */ - int res = qat_sym_session_handle_single_pass( - dev->data->dev_private, session, aead_xform); - if (res < 0) - return res; - if (session->is_single_pass) - return 0; - } - if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT && aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) || (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT && @@ -1601,7 +1680,7 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, (struct icp_qat_fw_la_auth_req_params *) ((char *)&req_tmpl->serv_specif_rqpars + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); - uint16_t state1_size = 0, state2_size = 0; + uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0; uint16_t hash_offset, cd_size; uint32_t *aad_len = NULL; uint32_t wordIndex = 0; @@ -1651,10 +1730,11 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr; hash->auth_config.reserved = 0; hash->auth_config.config = - ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, + ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode, cdesc->qat_hash_alg, digestsize); - if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 + if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0 + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC @@ -1677,6 +1757,15 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, */ switch (cdesc->qat_hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SHA1: + if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { + /* Plain SHA-1 */ + rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState, + sizeof(sha1InitialState)); + state1_size = qat_hash_get_state1_size( + cdesc->qat_hash_alg); + break; + } + /* SHA-1 HMAC */ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size, cdesc->aes_cmac)) { @@ -1686,6 +1775,15 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8); break; case ICP_QAT_HW_AUTH_ALGO_SHA224: + if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { + /* Plain SHA-224 */ + rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState, + sizeof(sha224InitialState)); + state1_size = qat_hash_get_state1_size( + cdesc->qat_hash_alg); + break; + } + /* SHA-224 HMAC */ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size, cdesc->aes_cmac)) { @@ -1695,6 +1793,15 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, state2_size = ICP_QAT_HW_SHA224_STATE2_SZ; break; case ICP_QAT_HW_AUTH_ALGO_SHA256: + if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { + /* Plain SHA-256 */ + rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState, + sizeof(sha256InitialState)); + state1_size = qat_hash_get_state1_size( + cdesc->qat_hash_alg); + break; + } + /* SHA-256 HMAC */ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size, cdesc->aes_cmac)) { @@ -1704,6 +1811,15 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, state2_size = ICP_QAT_HW_SHA256_STATE2_SZ; break; case ICP_QAT_HW_AUTH_ALGO_SHA384: + if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { + /* Plain SHA-384 */ + rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState, + sizeof(sha384InitialState)); + state1_size = qat_hash_get_state1_size( + cdesc->qat_hash_alg); + break; + } + /* SHA-384 HMAC */ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size, cdesc->aes_cmac)) { @@ -1713,6 +1829,15 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, state2_size = ICP_QAT_HW_SHA384_STATE2_SZ; break; case ICP_QAT_HW_AUTH_ALGO_SHA512: + if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { + /* Plain SHA-512 */ + rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState, + sizeof(sha512InitialState)); + state1_size = qat_hash_get_state1_size( + cdesc->qat_hash_alg); + break; + } + /* SHA-512 HMAC */ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size, cdesc->aes_cmac)) { @@ -1777,7 +1902,7 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, memcpy(cipherconfig->key, authkey, authkeylen); memset(cipherconfig->key + authkeylen, 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ); - cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + + cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) + authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; break; @@ -1793,8 +1918,7 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ); memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); - cdesc->cd_cur_ptr += state1_size + state2_size - + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; + cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; cdesc->min_qat_dev_gen = QAT_GEN2; @@ -1880,7 +2004,7 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); - cdesc->cd_cur_ptr += state1_size + state2_size; + cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size; cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd; cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; @@ -1984,3 +2108,151 @@ int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg) } return 0; } + +#ifdef RTE_LIB_SECURITY +static int +qat_sec_session_check_docsis(struct rte_security_session_conf *conf) +{ + struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform; + struct rte_security_docsis_xform *docsis = &conf->docsis; + + /* CRC generate -> Cipher encrypt */ + if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) { + + if (crypto_sym != NULL && + crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT && + crypto_sym->cipher.algo == + RTE_CRYPTO_CIPHER_AES_DOCSISBPI && + (crypto_sym->cipher.key.length == + ICP_QAT_HW_AES_128_KEY_SZ || + crypto_sym->cipher.key.length == + ICP_QAT_HW_AES_256_KEY_SZ) && + crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ && + crypto_sym->next == NULL) { + return 0; + } + /* Cipher decrypt -> CRC verify */ + } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) { + + if (crypto_sym != NULL && + crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT && + crypto_sym->cipher.algo == + RTE_CRYPTO_CIPHER_AES_DOCSISBPI && + (crypto_sym->cipher.key.length == + ICP_QAT_HW_AES_128_KEY_SZ || + crypto_sym->cipher.key.length == + ICP_QAT_HW_AES_256_KEY_SZ) && + crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ && + crypto_sym->next == NULL) { + return 0; + } + } + + return -EINVAL; +} + +static int +qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev, + struct rte_security_session_conf *conf, void *session_private) +{ + int ret; + int qat_cmd_id; + struct rte_crypto_sym_xform *xform = NULL; + struct qat_sym_session *session = session_private; + + /* Clear the session */ + memset(session, 0, qat_sym_session_get_private_size(dev)); + + ret = qat_sec_session_check_docsis(conf); + if (ret) { + QAT_LOG(ERR, "Unsupported DOCSIS security configuration"); + return ret; + } + + xform = conf->crypto_xform; + + /* Verify the session physical address is known */ + rte_iova_t session_paddr = rte_mempool_virt2iova(session); + if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) { + QAT_LOG(ERR, + "Session physical address unknown. Bad memory pool."); + return -EINVAL; + } + + /* Set context descriptor physical address */ + session->cd_paddr = session_paddr + + offsetof(struct qat_sym_session, cd); + + session->min_qat_dev_gen = QAT_GEN1; + + /* Get requested QAT command id - should be cipher */ + qat_cmd_id = qat_get_cmd_id(xform); + if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) { + QAT_LOG(ERR, "Unsupported xform chain requested"); + return -ENOTSUP; + } + session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id; + + ret = qat_sym_session_configure_cipher(dev, xform, session); + if (ret < 0) + return ret; + + return 0; +} + +int +qat_security_session_create(void *dev, + struct rte_security_session_conf *conf, + struct rte_security_session *sess, + struct rte_mempool *mempool) +{ + void *sess_private_data; + struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; + int ret; + + if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL || + conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) { + QAT_LOG(ERR, "Invalid security protocol"); + return -EINVAL; + } + + if (rte_mempool_get(mempool, &sess_private_data)) { + QAT_LOG(ERR, "Couldn't get object from session mempool"); + return -ENOMEM; + } + + ret = qat_sec_session_set_docsis_parameters(cdev, conf, + sess_private_data); + if (ret != 0) { + QAT_LOG(ERR, "Failed to configure session parameters"); + /* Return session to mempool */ + rte_mempool_put(mempool, sess_private_data); + return ret; + } + + set_sec_session_private_data(sess, sess_private_data); + + return ret; +} + +int +qat_security_session_destroy(void *dev __rte_unused, + struct rte_security_session *sess) +{ + void *sess_priv = get_sec_session_private_data(sess); + struct qat_sym_session *s = (struct qat_sym_session *)sess_priv; + + if (sess_priv) { + if (s->bpi_ctx) + bpi_cipher_ctx_free(s->bpi_ctx); + memset(s, 0, qat_sym_session_get_private_size(dev)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + + set_sec_session_private_data(sess, NULL); + rte_mempool_put(sess_mp, sess_priv); + } + return 0; +} +#endif