/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2019 Intel Corporation
*/
#include <openssl/evp.h>
#include "qat_sym.h"
+
/** Decrypt a single partial block
* Depends on openssl libcrypto
* Uses ECB+XOR to do CFB encryption, same result, more performant
uint32_t auth_len = 0, auth_ofs = 0;
uint32_t min_ofs = 0;
uint64_t src_buf_start = 0, dst_buf_start = 0;
+ uint64_t auth_data_end = 0;
uint8_t do_sgl = 0;
+ uint8_t in_place = 1;
+ int alignment_adjustment = 0;
struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
struct qat_sym_op_cookie *cookie =
(struct qat_sym_op_cookie *)op_cookie;
rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
cipher_param = (void *)&qat_req->serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
if (unlikely(
- (cipher_param->cipher_length % BYTE_LENGTH != 0)
- || (cipher_param->cipher_offset
- % BYTE_LENGTH != 0))) {
+ (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+ (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
QAT_DP_LOG(ERR,
"SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
- if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
- || (auth_param->auth_len % BYTE_LENGTH != 0))) {
+ if (unlikely(
+ (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+ (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
QAT_DP_LOG(ERR,
"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
* Don't align DMA start. DMA the minimum data-set
* so as not to overwrite data in dest buffer
*/
+ in_place = 0;
src_buf_start =
rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
dst_buf_start =
min_ofs);
}
dst_buf_start = src_buf_start;
+
+ /* remember any adjustment for later, note, can be +/- */
+ alignment_adjustment = src_buf_start -
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
}
if (do_cipher || do_aead) {
(cipher_param->cipher_offset + cipher_param->cipher_length)
: (auth_param->auth_off + auth_param->auth_len);
+ if (do_auth && do_cipher) {
+ /* Handle digest-encrypted cases, i.e.
+ * auth-gen-then-cipher-encrypt and
+ * cipher-decrypt-then-auth-verify
+ */
+ /* First find the end of the data */
+ if (do_sgl) {
+ uint32_t remaining_off = auth_param->auth_off +
+ auth_param->auth_len + alignment_adjustment;
+ struct rte_mbuf *sgl_buf =
+ (in_place ?
+ op->sym->m_src : op->sym->m_dst);
+
+ while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+ && sgl_buf->next != NULL) {
+ remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+ sgl_buf = sgl_buf->next;
+ }
+
+ auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
+ sgl_buf, remaining_off);
+ } else {
+ auth_data_end = (in_place ?
+ src_buf_start : dst_buf_start) +
+ auth_param->auth_off + auth_param->auth_len;
+ }
+ /* Then check if digest-encrypted conditions are met */
+ if ((auth_param->auth_off + auth_param->auth_len <
+ cipher_param->cipher_offset +
+ cipher_param->cipher_length) &&
+ (op->sym->auth.digest.phys_addr ==
+ auth_data_end)) {
+ /* Handle partial digest encryption */
+ if (cipher_param->cipher_offset +
+ cipher_param->cipher_length <
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length)
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length;
+ struct icp_qat_fw_comn_req_hdr *header =
+ &qat_req->comn_hdr;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ }
+ }
+
if (do_sgl) {
ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
QAT_COMN_PTR_TYPE_SGL);
- ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
- &cookie->qat_sgl_src,
- qat_req->comn_mid.src_length);
+ ret = qat_sgl_fill_array(op->sym->m_src,
+ (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
+ &cookie->qat_sgl_src,
+ qat_req->comn_mid.src_length,
+ QAT_SYM_SGL_MAX_NUMBER);
if (unlikely(ret)) {
QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
cookie->qat_sgl_src_phys_addr;
else {
ret = qat_sgl_fill_array(op->sym->m_dst,
- dst_buf_start,
- &cookie->qat_sgl_dst,
- qat_req->comn_mid.dst_length);
+ (int64_t)(dst_buf_start -
+ rte_pktmbuf_iova(op->sym->m_dst)),
+ &cookie->qat_sgl_dst,
+ qat_req->comn_mid.dst_length,
+ QAT_SYM_SGL_MAX_NUMBER);
if (unlikely(ret)) {
QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
qat_req->comn_mid.dest_data_addr =
cookie->qat_sgl_dst_phys_addr;
}
+ qat_req->comn_mid.src_length = 0;
+ qat_req->comn_mid.dst_length = 0;
} else {
qat_req->comn_mid.src_data_addr = src_buf_start;
qat_req->comn_mid.dest_data_addr = dst_buf_start;
}
+ /* Handle Single-Pass GCM */
+ if (ctx->is_single_pass) {
+ cipher_param->spc_aad_addr = op->sym->aead.aad.phys_addr;
+ cipher_param->spc_auth_res_addr =
+ op->sym->aead.digest.phys_addr;
+ }
+
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));