X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fqat%2Fqat_sym_session.c;h=8ca475ca8b37d3fef0696dd88609f88f32227da1;hb=581c39b1cb7d82d301a8ec6a16778bcf6ee11c7c;hp=717893c784e120dc9eaec83ef84634c622153941;hpb=faa57df0b458d325aa90bc8e0de67987e8acc095;p=dpdk.git diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c index 717893c784..8ca475ca8b 100644 --- a/drivers/crypto/qat/qat_sym_session.c +++ b/drivers/crypto/qat/qat_sym_session.c @@ -14,7 +14,7 @@ #include #include #include -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY #include #endif @@ -57,6 +57,29 @@ static const uint8_t sha512InitialState[] = { 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79}; +static int +qat_sym_cd_cipher_set(struct qat_sym_session *cd, + const uint8_t *enckey, + uint32_t enckeylen); + +static int +qat_sym_cd_auth_set(struct qat_sym_session *cdesc, + const uint8_t *authkey, + uint32_t authkeylen, + uint32_t aad_length, + uint32_t digestsize, + unsigned int operation); +static void +qat_sym_session_init_common_hdr(struct qat_sym_session *session); + +/* Req/cd init functions */ + +static void +qat_sym_session_finalize(struct qat_sym_session *session) +{ + qat_sym_session_init_common_hdr(session); +} + /** Frees a context previously created * Depends on openssl libcrypto */ @@ -108,7 +131,7 @@ ctx_init_err: static int qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo, - struct qat_sym_dev_private *internals) + struct qat_cryptodev_private *internals) { int i = 0; const struct rte_cryptodev_capabilities *capability; @@ -129,7 +152,7 @@ qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo, static int qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo, - struct qat_sym_dev_private *internals) + struct qat_cryptodev_private *internals) { int i = 0; const struct rte_cryptodev_capabilities *capability; @@ -244,8 +267,10 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, struct qat_sym_session *session) { - struct qat_sym_dev_private *internals = dev->data->dev_private; + struct qat_cryptodev_private *internals = dev->data->dev_private; struct rte_crypto_cipher_xform *cipher_xform = NULL; + enum qat_device_gen qat_dev_gen = + internals->qat_dev->qat_dev_gen; int ret; /* Get cipher xform from crypto xform chain */ @@ -272,6 +297,8 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev, goto error_out; } session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + if (qat_dev_gen == QAT_GEN4) + session->is_ucs = 1; break; case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: if (qat_sym_validate_snow3g_key(cipher_xform->key.length, @@ -411,7 +438,7 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev, else session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; - if (qat_sym_session_aead_create_cd_cipher(session, + if (qat_sym_cd_cipher_set(session, cipher_xform->key.data, cipher_xform->key.length)) { ret = -EINVAL; @@ -505,7 +532,8 @@ static void qat_sym_session_handle_mixed(const struct rte_cryptodev *dev, struct qat_sym_session *session) { - const struct qat_sym_dev_private *qat_private = dev->data->dev_private; + const struct qat_cryptodev_private *qat_private = + dev->data->dev_private; enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities & QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3; @@ -537,8 +565,11 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *session_private) { struct qat_sym_session *session = session_private; + struct qat_cryptodev_private *internals = dev->data->dev_private; + enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen; int ret; int qat_cmd_id; + int handle_mixed = 0; /* Verify the session physical address is known */ rte_iova_t session_paddr = rte_mempool_virt2iova(session); @@ -548,11 +579,14 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, return -EINVAL; } + memset(session, 0, sizeof(*session)); /* Set context descriptor physical address */ session->cd_paddr = session_paddr + offsetof(struct qat_sym_session, cd); session->min_qat_dev_gen = QAT_GEN1; + session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE; + session->is_ucs = 0; /* Get requested QAT command id */ qat_cmd_id = qat_get_cmd_id(xform); @@ -571,6 +605,10 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, ret = qat_sym_session_configure_auth(dev, xform, session); if (ret < 0) return ret; + session->is_single_pass_gmac = + qat_dev_gen == QAT_GEN3 && + xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC && + xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE; break; case ICP_QAT_FW_LA_CMD_CIPHER_HASH: if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { @@ -587,8 +625,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, xform, session); if (ret < 0) return ret; - /* Special handling of mixed hash+cipher algorithms */ - qat_sym_session_handle_mixed(dev, session); + handle_mixed = 1; } break; case ICP_QAT_FW_LA_CMD_HASH_CIPHER: @@ -606,8 +643,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, xform, session); if (ret < 0) return ret; - /* Special handling of mixed hash+cipher algorithms */ - qat_sym_session_handle_mixed(dev, session); + handle_mixed = 1; } break; case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: @@ -627,72 +663,41 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, session->qat_cmd); return -ENOTSUP; } + qat_sym_session_finalize(session); + if (handle_mixed) { + /* Special handling of mixed hash+cipher algorithms */ + qat_sym_session_handle_mixed(dev, session); + } return 0; } static int qat_sym_session_handle_single_pass(struct qat_sym_session *session, - struct rte_crypto_aead_xform *aead_xform) + const struct rte_crypto_aead_xform *aead_xform) { - struct icp_qat_fw_la_cipher_req_params *cipher_param = - (void *) &session->fw_req.serv_specif_rqpars; - session->is_single_pass = 1; + session->is_auth = 1; session->min_qat_dev_gen = QAT_GEN3; session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER; + /* Chacha-Poly is special case that use QAT CTR mode */ if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) { session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE; - ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( - session->fw_req.comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); } else { - /* Chacha-Poly is special case that use QAT CTR mode */ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; } session->cipher_iv.offset = aead_xform->iv.offset; session->cipher_iv.length = aead_xform->iv.length; - if (qat_sym_session_aead_create_cd_cipher(session, - aead_xform->key.data, aead_xform->key.length)) - return -EINVAL; session->aad_len = aead_xform->aad_length; session->digest_length = aead_xform->digest_length; + if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) { session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; session->auth_op = ICP_QAT_HW_AUTH_GENERATE; - ICP_QAT_FW_LA_RET_AUTH_SET( - session->fw_req.comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_RET_AUTH_RES); } else { session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; session->auth_op = ICP_QAT_HW_AUTH_VERIFY; - ICP_QAT_FW_LA_CMP_AUTH_SET( - session->fw_req.comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_CMP_AUTH_RES); } - ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET( - session->fw_req.comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_SINGLE_PASS_PROTO); - ICP_QAT_FW_LA_PROTO_SET( - session->fw_req.comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_NO_PROTO); - session->fw_req.comn_hdr.service_cmd_id = - ICP_QAT_FW_LA_CMD_CIPHER; - session->cd.cipher.cipher_config.val = - ICP_QAT_HW_CIPHER_CONFIG_BUILD( - ICP_QAT_HW_CIPHER_AEAD_MODE, - session->qat_cipher_alg, - ICP_QAT_HW_CIPHER_NO_CONVERT, - session->qat_dir); - QAT_FIELD_SET(session->cd.cipher.cipher_config.val, - aead_xform->digest_length, - QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS, - QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK); - session->cd.cipher.cipher_config.reserved = - ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER( - aead_xform->aad_length); - cipher_param->spc_aad_sz = aead_xform->aad_length; - cipher_param->spc_auth_res_sz = aead_xform->digest_length; return 0; } @@ -703,14 +708,19 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, struct qat_sym_session *session) { struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform); - struct qat_sym_dev_private *internals = dev->data->dev_private; + struct qat_cryptodev_private *internals = dev->data->dev_private; const uint8_t *key_data = auth_xform->key.data; uint8_t key_length = auth_xform->key.length; - session->aes_cmac = 0; + enum qat_device_gen qat_dev_gen = + internals->qat_dev->qat_dev_gen; + session->aes_cmac = 0; + session->auth_key_length = auth_xform->key.length; session->auth_iv.offset = auth_xform->iv.offset; session->auth_iv.length = auth_xform->iv.length; session->auth_mode = ICP_QAT_HW_AUTH_MODE1; + session->is_auth = 1; + session->digest_length = auth_xform->digest_length; switch (auth_xform->algo) { case RTE_CRYPTO_AUTH_SHA1: @@ -765,7 +775,12 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; if (session->auth_iv.length == 0) session->auth_iv.length = AES_GCM_J0_LEN; - + else + session->is_iv12B = 1; + if (qat_dev_gen == QAT_GEN4) { + session->is_cnt_zero = 1; + session->is_ucs = 1; + } break; case RTE_CRYPTO_AUTH_SNOW3G_UIA2: session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2; @@ -800,6 +815,7 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, } if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) { + session->is_gmac = 1; if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) { session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH; session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; @@ -807,13 +823,12 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, * It needs to create cipher desc content first, * then authentication */ - - if (qat_sym_session_aead_create_cd_cipher(session, + if (qat_sym_cd_cipher_set(session, auth_xform->key.data, auth_xform->key.length)) return -EINVAL; - if (qat_sym_session_aead_create_cd_auth(session, + if (qat_sym_cd_auth_set(session, key_data, key_length, 0, @@ -828,7 +843,7 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, * then cipher */ - if (qat_sym_session_aead_create_cd_auth(session, + if (qat_sym_cd_auth_set(session, key_data, key_length, 0, @@ -836,15 +851,13 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, auth_xform->op)) return -EINVAL; - if (qat_sym_session_aead_create_cd_cipher(session, + if (qat_sym_cd_cipher_set(session, auth_xform->key.data, auth_xform->key.length)) return -EINVAL; } - /* Restore to authentication only only */ - session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH; } else { - if (qat_sym_session_aead_create_cd_auth(session, + if (qat_sym_cd_auth_set(session, key_data, key_length, 0, @@ -853,7 +866,6 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, return -EINVAL; } - session->digest_length = auth_xform->digest_length; return 0; } @@ -864,7 +876,7 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev, { struct rte_crypto_aead_xform *aead_xform = &xform->aead; enum rte_crypto_auth_operation crypto_operation; - struct qat_sym_dev_private *internals = + struct qat_cryptodev_private *internals = dev->data->dev_private; enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen; @@ -877,6 +889,8 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev, session->cipher_iv.length = xform->aead.iv.length; session->auth_mode = ICP_QAT_HW_AUTH_MODE1; + session->is_auth = 1; + session->digest_length = aead_xform->digest_length; session->is_single_pass = 0; switch (aead_xform->algo) { @@ -888,14 +902,18 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev, } session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; - if (qat_dev_gen > QAT_GEN2 && aead_xform->iv.length == - QAT_AES_GCM_SPC_IV_SIZE) { - return qat_sym_session_handle_single_pass(session, - aead_xform); - } - if (session->cipher_iv.length == 0) - session->cipher_iv.length = AES_GCM_J0_LEN; + if (qat_dev_gen == QAT_GEN4) + session->is_ucs = 1; + if (session->cipher_iv.length == 0) { + session->cipher_iv.length = AES_GCM_J0_LEN; + break; + } + session->is_iv12B = 1; + if (qat_dev_gen < QAT_GEN3) + break; + qat_sym_session_handle_single_pass(session, + aead_xform); break; case RTE_CRYPTO_AEAD_AES_CCM: if (qat_sym_validate_aes_key(aead_xform->key.length, @@ -905,21 +923,30 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev, } session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC; + if (qat_dev_gen == QAT_GEN4) + session->is_ucs = 1; break; case RTE_CRYPTO_AEAD_CHACHA20_POLY1305: if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ) return -EINVAL; + if (qat_dev_gen == QAT_GEN4) + session->is_ucs = 1; session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305; - return qat_sym_session_handle_single_pass(session, + qat_sym_session_handle_single_pass(session, aead_xform); + break; default: QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n", aead_xform->algo); return -EINVAL; } - if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT && + if (session->is_single_pass) { + if (qat_sym_cd_cipher_set(session, + aead_xform->key.data, aead_xform->key.length)) + return -EINVAL; + } else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT && aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) || (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT && aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) { @@ -931,12 +958,12 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev, crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ? RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY; - if (qat_sym_session_aead_create_cd_cipher(session, + if (qat_sym_cd_cipher_set(session, aead_xform->key.data, aead_xform->key.length)) return -EINVAL; - if (qat_sym_session_aead_create_cd_auth(session, + if (qat_sym_cd_auth_set(session, aead_xform->key.data, aead_xform->key.length, aead_xform->aad_length, @@ -953,7 +980,7 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev, crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ? RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE; - if (qat_sym_session_aead_create_cd_auth(session, + if (qat_sym_cd_auth_set(session, aead_xform->key.data, aead_xform->key.length, aead_xform->aad_length, @@ -961,13 +988,12 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev, crypto_operation)) return -EINVAL; - if (qat_sym_session_aead_create_cd_cipher(session, + if (qat_sym_cd_cipher_set(session, aead_xform->key.data, aead_xform->key.length)) return -EINVAL; } - session->digest_length = aead_xform->digest_length; return 0; } @@ -1190,6 +1216,9 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg, uint64_t *hash_state_out_be64; int i; + /* Initialize to avoid gcc warning */ + memset(digest, 0, sizeof(digest)); + digest_size = qat_hash_get_digest_size(hash_alg); if (digest_size <= 0) return -EFAULT; @@ -1439,12 +1468,17 @@ static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, } static void -qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, - enum qat_sym_proto_flag proto_flags) +qat_sym_session_init_common_hdr(struct qat_sym_session *session) { + struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + enum qat_sym_proto_flag proto_flags = session->qat_proto_flag; + uint32_t slice_flags = session->slice_types; + header->hdr_flags = ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA; + header->service_cmd_id = session->qat_cmd; header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, QAT_COMN_PTR_TYPE_FLAT); @@ -1476,41 +1510,55 @@ qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, break; } - ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_UPDATE_STATE); - ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); -} + /* More than one of the following flags can be set at once */ + if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_SPC)) { + ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET( + header->serv_specif_flags, + ICP_QAT_FW_LA_SINGLE_PASS_PROTO); + } + if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_UCS)) { + ICP_QAT_FW_LA_SLICE_TYPE_SET( + header->serv_specif_flags, + ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE); + } -/* - * Snow3G and ZUC should never use this function - * and set its protocol flag in both cipher and auth part of content - * descriptor building function - */ -static enum qat_sym_proto_flag -qat_get_crypto_proto_flag(uint16_t flags) -{ - int proto = ICP_QAT_FW_LA_PROTO_GET(flags); - enum qat_sym_proto_flag qat_proto_flag = - QAT_CRYPTO_PROTO_FLAG_NONE; + if (session->is_auth) { + if (session->auth_op == ICP_QAT_HW_AUTH_VERIFY) { + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_CMP_AUTH_RES); + } else if (session->auth_op == ICP_QAT_HW_AUTH_GENERATE) { + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + } + } else { + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + } - switch (proto) { - case ICP_QAT_FW_LA_GCM_PROTO: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; - break; - case ICP_QAT_FW_LA_CCM_PROTO: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM; - break; + if (session->is_iv12B) { + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( + header->serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); } - return qat_proto_flag; + ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_UPDATE_STATE); + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); } -int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc, +int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc, const uint8_t *cipherkey, uint32_t cipherkeylen) { struct icp_qat_hw_cipher_algo_blk *cipher; + struct icp_qat_hw_cipher_algo_blk20 *cipher20; struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; @@ -1518,8 +1566,12 @@ int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc, struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; enum icp_qat_hw_cipher_convert key_convert; - enum qat_sym_proto_flag qat_proto_flag = - QAT_CRYPTO_PROTO_FLAG_NONE; + struct icp_qat_fw_la_cipher_20_req_params *req_ucs = + (struct icp_qat_fw_la_cipher_20_req_params *) + &cdesc->fw_req.serv_specif_rqpars; + struct icp_qat_fw_la_cipher_req_params *req_cipher = + (struct icp_qat_fw_la_cipher_req_params *) + &cdesc->fw_req.serv_specif_rqpars; uint32_t total_key_size; uint16_t cipher_offset, cd_size; uint32_t wordIndex = 0; @@ -1555,9 +1607,16 @@ int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc, if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) { /* * CTR Streaming ciphers are a special case. Decrypt = encrypt - * Overriding default values previously set + * Overriding default values previously set. + * Chacha20-Poly1305 is special case, CTR but single-pass + * so both direction need to be used. */ cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + if (cdesc->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 && + cdesc->auth_op == ICP_QAT_HW_AUTH_VERIFY) { + cdesc->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; + } key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || cdesc->qat_cipher_alg == @@ -1565,6 +1624,8 @@ int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc, key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE) + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; else key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; @@ -1573,7 +1634,7 @@ int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; + cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ; @@ -1583,35 +1644,26 @@ int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc, } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) { total_key_size = ICP_QAT_HW_3DES_KEY_SZ; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3; - qat_proto_flag = - qat_get_crypto_proto_flag(header->serv_specif_flags); } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) { total_key_size = ICP_QAT_HW_DES_KEY_SZ; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3; - qat_proto_flag = - qat_get_crypto_proto_flag(header->serv_specif_flags); } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; + cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; cdesc->min_qat_dev_gen = QAT_GEN2; } else { total_key_size = cipherkeylen; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; - qat_proto_flag = - qat_get_crypto_proto_flag(header->serv_specif_flags); } - cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3; cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd); cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3; - header->service_cmd_id = cdesc->qat_cmd; - qat_sym_session_init_common_hdr(header, qat_proto_flag); - cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr; + cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr; cipher->cipher_config.val = ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode, cdesc->qat_cipher_alg, key_convert, @@ -1631,12 +1683,38 @@ int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc, cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + cipherkeylen + cipherkeylen; + } else if (cdesc->is_ucs) { + const uint8_t *final_key = cipherkey; + + cdesc->slice_types |= QAT_CRYPTO_SLICE_UCS; + total_key_size = RTE_ALIGN_CEIL(cipherkeylen, + ICP_QAT_HW_AES_128_KEY_SZ); + cipher20->cipher_config.reserved[0] = 0; + cipher20->cipher_config.reserved[1] = 0; + cipher20->cipher_config.reserved[2] = 0; + + rte_memcpy(cipher20->key, final_key, cipherkeylen); + cdesc->cd_cur_ptr += + sizeof(struct icp_qat_hw_ucs_cipher_config) + + cipherkeylen; } else { memcpy(cipher->key, cipherkey, cipherkeylen); cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + cipherkeylen; } + if (cdesc->is_single_pass) { + QAT_FIELD_SET(cipher->cipher_config.val, + cdesc->digest_length, + QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS, + QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK); + /* UCS and SPC 1.8/2.0 share configuration of 2nd config word */ + cdesc->cd.cipher.cipher_config.reserved = + ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER( + cdesc->aad_len); + cdesc->slice_types |= QAT_CRYPTO_SLICE_SPC; + } + if (total_key_size > cipherkeylen) { uint32_t padding_size = total_key_size-cipherkeylen; if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) @@ -1655,13 +1733,28 @@ int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc, cdesc->cd_cur_ptr += padding_size; } + if (cdesc->is_ucs) { + /* + * These values match in terms of position auth + * slice request fields + */ + req_ucs->spc_auth_res_sz = cdesc->digest_length; + if (!cdesc->is_gmac) { + req_ucs->spc_aad_sz = cdesc->aad_len; + req_ucs->spc_aad_offset = 0; + } + } else if (cdesc->is_single_pass) { + req_cipher->spc_aad_sz = cdesc->aad_len; + req_cipher->spc_auth_res_sz = cdesc->digest_length; + } cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd; cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3; + cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3; return 0; } -int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, +int qat_sym_cd_auth_set(struct qat_sym_session *cdesc, const uint8_t *authkey, uint32_t authkeylen, uint32_t aad_length, @@ -1672,7 +1765,6 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, struct icp_qat_hw_cipher_algo_blk *cipherconfig; struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; - struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; void *ptr = &req_tmpl->cd_ctrl; struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; @@ -1685,8 +1777,6 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, uint32_t *aad_len = NULL; uint32_t wordIndex = 0; uint32_t *pTempKey; - enum qat_sym_proto_flag qat_proto_flag = - QAT_CRYPTO_PROTO_FLAG_NONE; if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) { ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, @@ -1709,19 +1799,10 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, return -EFAULT; } - if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) { - ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_RET_AUTH_RES); - ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_CMP_AUTH_RES); + if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY; - } else { - ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_RET_AUTH_RES); - ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + else cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE; - } /* * Setup the inner hash config @@ -1740,6 +1821,7 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL + || cdesc->is_cnt_zero ) hash->auth_counter.counter = 0; else { @@ -1863,7 +1945,7 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, break; case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; + cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ; if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey, authkeylen, cdesc->cd_cur_ptr + state1_size, @@ -1886,7 +1968,7 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, cdesc->aad_len = aad_length; break; case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; + cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; state1_size = qat_hash_get_state1_size( ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2); state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ; @@ -1910,7 +1992,7 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, hash->auth_config.config = ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0, cdesc->qat_hash_alg, digestsize); - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; + cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; state1_size = qat_hash_get_state1_size( ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3); state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ; @@ -1938,7 +2020,7 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, state2_size = ICP_QAT_HW_NULL_STATE2_SZ; break; case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM; + cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM; state1_size = qat_hash_get_state1_size( ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC); state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ + @@ -1986,10 +2068,6 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, return -EFAULT; } - /* Request template setup */ - qat_sym_session_init_common_hdr(header, qat_proto_flag); - header->service_cmd_id = cdesc->qat_cmd; - /* Auth CD config setup */ hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3; hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; @@ -2109,7 +2187,7 @@ int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg) return 0; } -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY static int qat_sec_session_check_docsis(struct rte_security_session_conf *conf) { @@ -2162,6 +2240,9 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform = NULL; struct qat_sym_session *session = session_private; + /* Clear the session */ + memset(session, 0, qat_sym_session_get_private_size(dev)); + ret = qat_sec_session_check_docsis(conf); if (ret) { QAT_LOG(ERR, "Unsupported DOCSIS security configuration"); @@ -2184,23 +2265,18 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev, session->min_qat_dev_gen = QAT_GEN1; - /* Get requested QAT command id */ + /* Get requested QAT command id - should be cipher */ qat_cmd_id = qat_get_cmd_id(xform); - if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) { + if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) { QAT_LOG(ERR, "Unsupported xform chain requested"); return -ENOTSUP; } session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id; - switch (session->qat_cmd) { - case ICP_QAT_FW_LA_CMD_CIPHER: - ret = qat_sym_session_configure_cipher(dev, xform, session); - if (ret < 0) - return ret; - break; - default: - QAT_LOG(ERR, "Unsupported Service %u", session->qat_cmd); - return -ENOTSUP; - } + + ret = qat_sym_session_configure_cipher(dev, xform, session); + if (ret < 0) + return ret; + qat_sym_session_finalize(session); return 0; } @@ -2215,16 +2291,17 @@ qat_security_session_create(void *dev, struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; int ret; + if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL || + conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) { + QAT_LOG(ERR, "Invalid security protocol"); + return -EINVAL; + } + if (rte_mempool_get(mempool, &sess_private_data)) { QAT_LOG(ERR, "Couldn't get object from session mempool"); return -ENOMEM; } - if (conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) { - QAT_LOG(ERR, "Invalid security protocol"); - return -EINVAL; - } - ret = qat_sec_session_set_docsis_parameters(cdev, conf, sess_private_data); if (ret != 0) {