X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fqat%2Fqat_sym.c;h=5ff4aa1e5ab8500af8c08de6b0c727f2466a3c09;hb=ca041cd44fcc8b22c0e84460254596096e8fe914;hp=2dc0614928e99fe74ca8f056e42d96f692903bfc;hpb=40002f6c2a24547846fee73afd345c3f7848a8f1;p=dpdk.git diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c index 2dc0614928..5ff4aa1e5a 100644 --- a/drivers/crypto/qat/qat_sym.c +++ b/drivers/crypto/qat/qat_sym.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2018 Intel Corporation + * Copyright(c) 2015-2019 Intel Corporation */ #include @@ -12,6 +12,7 @@ #include "qat_sym.h" + /** Decrypt a single partial block * Depends on openssl libcrypto * Uses ECB+XOR to do CFB encryption, same result, more performant @@ -156,9 +157,10 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, uint32_t auth_len = 0, auth_ofs = 0; uint32_t min_ofs = 0; uint64_t src_buf_start = 0, dst_buf_start = 0; - uint64_t digest_start = 0; + uint64_t auth_data_end = 0; uint8_t do_sgl = 0; uint8_t in_place = 1; + int alignment_adjustment = 0; struct rte_crypto_op *op = (struct rte_crypto_op *)in_op; struct qat_sym_op_cookie *cookie = (struct qat_sym_op_cookie *)op_cookie; @@ -194,7 +196,8 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; cipher_param = (void *)&qat_req->serv_specif_rqpars; - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + auth_param = (void *)((uint8_t *)cipher_param + + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { @@ -465,6 +468,10 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, min_ofs); } dst_buf_start = src_buf_start; + + /* remember any adjustment for later, note, can be +/- */ + alignment_adjustment = src_buf_start - + rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs); } if (do_cipher || do_aead) { @@ -494,33 +501,37 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, : (auth_param->auth_off + auth_param->auth_len); if (do_auth && do_cipher) { + /* Handle digest-encrypted cases, i.e. + * auth-gen-then-cipher-encrypt and + * cipher-decrypt-then-auth-verify + */ + /* First find the end of the data */ if (do_sgl) { uint32_t remaining_off = auth_param->auth_off + - auth_param->auth_len; + auth_param->auth_len + alignment_adjustment; struct rte_mbuf *sgl_buf = (in_place ? - op->sym->m_src : op->sym->m_dst); - while (remaining_off >= rte_pktmbuf_data_len( - sgl_buf)) { - remaining_off -= rte_pktmbuf_data_len( - sgl_buf); + op->sym->m_src : op->sym->m_dst); + + while (remaining_off >= rte_pktmbuf_data_len(sgl_buf) + && sgl_buf->next != NULL) { + remaining_off -= rte_pktmbuf_data_len(sgl_buf); sgl_buf = sgl_buf->next; } - digest_start = (uint64_t)rte_pktmbuf_iova_offset( + + auth_data_end = (uint64_t)rte_pktmbuf_iova_offset( sgl_buf, remaining_off); } else { - digest_start = (in_place ? + auth_data_end = (in_place ? src_buf_start : dst_buf_start) + auth_param->auth_off + auth_param->auth_len; } - /* Handle cases of auth-gen-then-cipher and - * cipher-decrypt-then-auth-verify with digest encrypted - */ + /* Then check if digest-encrypted conditions are met */ if ((auth_param->auth_off + auth_param->auth_len < cipher_param->cipher_offset + cipher_param->cipher_length) && (op->sym->auth.digest.phys_addr == - digest_start)) { + auth_data_end)) { /* Handle partial digest encryption */ if (cipher_param->cipher_offset + cipher_param->cipher_length < @@ -584,6 +595,13 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, qat_req->comn_mid.dest_data_addr = dst_buf_start; } + /* Handle Single-Pass GCM */ + if (ctx->is_single_pass) { + cipher_param->spc_aad_addr = op->sym->aead.aad.phys_addr; + cipher_param->spc_auth_res_addr = + op->sym->aead.digest.phys_addr; + } + #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, sizeof(struct icp_qat_fw_la_bulk_req));