/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2019 Intel Corporation
*/
#include <openssl/evp.h>
#include "qat_sym.h"
+
/** Decrypt a single partial block
* Depends on openssl libcrypto
* Uses ECB+XOR to do CFB encryption, same result, more performant
uint32_t auth_len = 0, auth_ofs = 0;
uint32_t min_ofs = 0;
uint64_t src_buf_start = 0, dst_buf_start = 0;
- uint64_t digest_start = 0;
+ uint64_t auth_data_end = 0;
uint8_t do_sgl = 0;
uint8_t in_place = 1;
+ int alignment_adjustment = 0;
struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
struct qat_sym_op_cookie *cookie =
(struct qat_sym_op_cookie *)op_cookie;
rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
cipher_param = (void *)&qat_req->serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
min_ofs);
}
dst_buf_start = src_buf_start;
+
+ /* remember any adjustment for later, note, can be +/- */
+ alignment_adjustment = src_buf_start -
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
}
if (do_cipher || do_aead) {
: (auth_param->auth_off + auth_param->auth_len);
if (do_auth && do_cipher) {
+ /* Handle digest-encrypted cases, i.e.
+ * auth-gen-then-cipher-encrypt and
+ * cipher-decrypt-then-auth-verify
+ */
+ /* First find the end of the data */
if (do_sgl) {
uint32_t remaining_off = auth_param->auth_off +
- auth_param->auth_len;
+ auth_param->auth_len + alignment_adjustment;
struct rte_mbuf *sgl_buf =
(in_place ?
- op->sym->m_src : op->sym->m_dst);
- while (remaining_off >= rte_pktmbuf_data_len(
- sgl_buf)) {
- remaining_off -= rte_pktmbuf_data_len(
- sgl_buf);
+ op->sym->m_src : op->sym->m_dst);
+
+ while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+ && sgl_buf->next != NULL) {
+ remaining_off -= rte_pktmbuf_data_len(sgl_buf);
sgl_buf = sgl_buf->next;
}
- digest_start = (uint64_t)rte_pktmbuf_iova_offset(
+
+ auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
sgl_buf, remaining_off);
} else {
- digest_start = (in_place ?
+ auth_data_end = (in_place ?
src_buf_start : dst_buf_start) +
auth_param->auth_off + auth_param->auth_len;
}
- /* Handle cases of auth-gen-then-cipher and
- * cipher-decrypt-then-auth-verify with digest encrypted
- */
+ /* Then check if digest-encrypted conditions are met */
if ((auth_param->auth_off + auth_param->auth_len <
cipher_param->cipher_offset +
cipher_param->cipher_length) &&
(op->sym->auth.digest.phys_addr ==
- digest_start)) {
+ auth_data_end)) {
/* Handle partial digest encryption */
if (cipher_param->cipher_offset +
cipher_param->cipher_length <
qat_req->comn_mid.dest_data_addr = dst_buf_start;
}
+ /* Handle Single-Pass GCM */
+ if (ctx->is_single_pass) {
+ cipher_param->spc_aad_addr = op->sym->aead.aad.phys_addr;
+ cipher_param->spc_auth_res_addr =
+ op->sym->aead.digest.phys_addr;
+ }
+
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));