/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2019 Intel Corporation
*/
+#include <openssl/evp.h>
+
#include <rte_mempool.h>
#include <rte_mbuf.h>
-#include <rte_hexdump.h>
#include <rte_crypto_sym.h>
#include <rte_bus_pci.h>
#include <rte_byteorder.h>
-#include <openssl/evp.h>
-
-#include "qat_logs.h"
-#include "qat_sym_session.h"
#include "qat_sym.h"
-#include "qat_qp.h"
-#include "adf_transport_access_macros.h"
-#include "qat_device.h"
-
-#define BYTE_LENGTH 8
-/* bpi is only used for partial blocks of DES and AES
- * so AES block len can be assumed as max len for iv, src and dst
- */
-#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
-
-/** Encrypt a single partial block
- * Depends on openssl libcrypto
- * Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
- uint8_t *iv, int ivlen, int srclen,
- void *bpi_ctx)
-{
- EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
- int encrypted_ivlen;
- uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
- uint8_t *encr = encrypted_iv;
-
- /* ECB method: encrypt the IV, then XOR this with plaintext */
- if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
- <= 0)
- goto cipher_encrypt_err;
-
- for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
- *dst = *src ^ *encr;
-
- return 0;
-cipher_encrypt_err:
- PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
- return -EINVAL;
-}
/** Decrypt a single partial block
* Depends on openssl libcrypto
return 0;
cipher_decrypt_err:
- PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+ QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
return -EINVAL;
}
-/** Creates a context in either AES or DES in ECB mode
- * Depends on openssl libcrypto
- */
static inline uint32_t
qat_bpicipher_preprocess(struct qat_sym_session *ctx,
last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
uint8_t *, last_block_offset);
- if (unlikely(sym_op->m_dst != NULL))
+ if (unlikely((sym_op->m_dst != NULL)
+ && (sym_op->m_dst != sym_op->m_src)))
/* out-of-place operation (OOP) */
dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
uint8_t *, last_block_offset);
iv = rte_crypto_op_ctod_offset(op, uint8_t *,
ctx->cipher_iv.offset);
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
- last_block_len);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+ last_block, last_block_len);
if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
- last_block_len);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
+ dst, last_block_len);
#endif
bpi_cipher_decrypt(last_block, dst, iv, block_len,
last_block_len, ctx->bpi_ctx);
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
- last_block_len);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+ last_block, last_block_len);
if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
- last_block_len);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+ dst, last_block_len);
#endif
}
return sym_op->cipher.data.length - last_block_len;
}
-static inline uint32_t
-qat_bpicipher_postprocess(struct qat_sym_session *ctx,
- struct rte_crypto_op *op)
-{
- int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
- struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = block_len > 0 ?
- sym_op->cipher.data.length % block_len : 0;
-
- if (last_block_len > 0 &&
- ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
-
- /* Encrypt last block */
- uint8_t *last_block, *dst, *iv;
- uint32_t last_block_offset;
-
- last_block_offset = sym_op->cipher.data.offset +
- sym_op->cipher.data.length - last_block_len;
- last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
- uint8_t *, last_block_offset);
-
- if (unlikely(sym_op->m_dst != NULL))
- /* out-of-place operation (OOP) */
- dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
- uint8_t *, last_block_offset);
- else
- dst = last_block;
-
- if (last_block_len < sym_op->cipher.data.length)
- /* use previous block ciphertext as IV */
- iv = dst - block_len;
- else
- /* runt block, i.e. less than one full block */
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- ctx->cipher_iv.offset);
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "BPI: src before post-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst before post-process:",
- dst, last_block_len);
-#endif
- bpi_cipher_encrypt(last_block, dst, iv, block_len,
- last_block_len, ctx->bpi_ctx);
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "BPI: src after post-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst after post-process:", dst,
- last_block_len);
-#endif
- }
- return sym_op->cipher.data.length - last_block_len;
-}
-
-uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-static int
-qat_sym_process_response(void **op, uint8_t *resp,
- __rte_unused void *op_cookie,
- __rte_unused enum qat_device_gen qat_dev_gen)
-{
-
- struct icp_qat_fw_comn_resp *resp_msg =
- (struct icp_qat_fw_comn_resp *)resp;
- struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
- (resp_msg->opaque_data);
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
- sizeof(struct icp_qat_fw_comn_resp));
-#endif
-
- if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
- ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
- resp_msg->comn_hdr.comn_status)) {
-
- rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- struct qat_sym_session *sess = (struct qat_sym_session *)
- get_session_private_data(
- rx_op->sym->session,
- cryptodev_qat_driver_id);
-
- if (sess->bpi_ctx)
- qat_bpicipher_postprocess(sess, rx_op);
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- }
- *op = (void *)rx_op;
-
- return 0;
-}
-
-
-uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
-}
-
static inline void
set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
struct icp_qat_fw_la_cipher_req_params *cipher_param,
iv_length);
}
-static int
+/** Handle Single-Pass AES-GMAC on QAT GEN3 */
+static inline void
+handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
+ struct qat_sym_op_cookie *cookie,
+ struct icp_qat_fw_la_bulk_req *qat_req)
+{
+ static const uint32_t ver_key_offset =
+ sizeof(struct icp_qat_hw_auth_setup) +
+ ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+ ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+ ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+ sizeof(struct icp_qat_hw_cipher_config);
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
+ (void *) &qat_req->cd_ctrl;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param =
+ (void *) &qat_req->serv_specif_rqpars;
+ uint32_t data_length = op->sym->auth.data.length;
+
+ /* Fill separate Content Descriptor for this op */
+ rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+ ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+ ctx->cd.cipher.key :
+ RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+ ctx->auth_key_length);
+ cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+ ICP_QAT_HW_CIPHER_AEAD_MODE,
+ ctx->qat_cipher_alg,
+ ICP_QAT_HW_CIPHER_NO_CONVERT,
+ (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+ ICP_QAT_HW_CIPHER_ENCRYPT :
+ ICP_QAT_HW_CIPHER_DECRYPT));
+ QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+ ctx->digest_length,
+ QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+ QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+ cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
+
+ /* Update the request */
+ qat_req->cd_pars.u.s.content_desc_addr =
+ cookie->opt.spc_gmac.cd_phys_addr;
+ qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+ sizeof(struct icp_qat_hw_cipher_config) +
+ ctx->auth_key_length, 8) >> 3;
+ qat_req->comn_mid.src_length = data_length;
+ qat_req->comn_mid.dst_length = 0;
+
+ cipher_param->spc_aad_addr = 0;
+ cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
+ cipher_param->spc_aad_sz = data_length;
+ cipher_param->reserved = 0;
+ cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+ qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+ cipher_cd_ctrl->cipher_cfg_offset = 0;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+ ICP_QAT_FW_LA_PROTO_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_NO_PROTO);
+}
+
+int
qat_sym_build_request(void *in_op, uint8_t *out_msg,
void *op_cookie, enum qat_device_gen qat_dev_gen)
{
int ret = 0;
- struct qat_sym_session *ctx;
+ struct qat_sym_session *ctx = NULL;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
uint32_t auth_len = 0, auth_ofs = 0;
uint32_t min_ofs = 0;
uint64_t src_buf_start = 0, dst_buf_start = 0;
+ uint64_t auth_data_end = 0;
uint8_t do_sgl = 0;
+ uint8_t in_place = 1;
+ int alignment_adjustment = 0;
+ int oop_shift = 0;
struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
struct qat_sym_op_cookie *cookie =
(struct qat_sym_op_cookie *)op_cookie;
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
- PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
"operation requests, op (%p) is not a "
"symmetric operation.", op);
return -EINVAL;
}
-#endif
+
if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
- PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
+ QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
" requests, op (%p) is sessionless.", op);
return -EINVAL;
+ } else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ op->sym->session, qat_sym_driver_id);
+#ifdef RTE_LIB_SECURITY
+ } else {
+ ctx = (struct qat_sym_session *)get_sec_session_private_data(
+ op->sym->sec_session);
+ if (likely(ctx)) {
+ if (unlikely(ctx->bpi_ctx == NULL)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports security"
+ " operation requests for"
+ " DOCSIS, op (%p) is not for"
+ " DOCSIS.", op);
+ return -EINVAL;
+ } else if (unlikely(((op->sym->m_dst != NULL) &&
+ (op->sym->m_dst != op->sym->m_src)) ||
+ op->sym->m_src->nb_segs > 1)) {
+ QAT_DP_LOG(ERR, "OOP and/or multi-segment"
+ " buffers not supported for"
+ " DOCSIS security.");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ }
+#endif
}
- ctx = (struct qat_sym_session *)get_session_private_data(
- op->sym->session, cryptodev_qat_driver_id);
-
if (unlikely(ctx == NULL)) {
- PMD_DRV_LOG(ERR, "Session was not created for this device");
+ QAT_DP_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
- PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
+ QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
return -EINVAL;
}
rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
cipher_param = (void *)&qat_req->serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+ cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
- if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
- ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+ !ctx->is_gmac) {
/* AES-GCM or AES-CCM */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
do_auth = 1;
do_cipher = 1;
}
- } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
do_auth = 1;
do_cipher = 0;
} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
if (unlikely(
- (cipher_param->cipher_length % BYTE_LENGTH != 0)
- || (cipher_param->cipher_offset
- % BYTE_LENGTH != 0))) {
- PMD_DRV_LOG(ERR,
+ (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+ (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
+ QAT_DP_LOG(ERR,
"SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
return -EINVAL;
cipher_ofs = op->sym->cipher.data.offset >> 3;
} else if (ctx->bpi_ctx) {
- /* DOCSIS - only send complete blocks to device
+ /* DOCSIS - only send complete blocks to device.
* Process any partial block using CFB mode.
* Even if 0 complete blocks, still send this to device
* to get into rx queue for post-process and dequeuing
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
- if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
- || (auth_param->auth_len % BYTE_LENGTH != 0))) {
- PMD_DRV_LOG(ERR,
+ if (unlikely(
+ (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+ (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
+ QAT_DP_LOG(ERR,
"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
return -EINVAL;
auth_param->u1.aad_adr = 0;
auth_param->u2.aad_sz = 0;
- /*
- * If len(iv)==12B fw computes J0
- */
- if (ctx->auth_iv.length == 12) {
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-
- }
} else {
auth_ofs = op->sym->auth.data.offset;
auth_len = op->sym->auth.data.length;
}
min_ofs = auth_ofs;
- if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
+ if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
+ ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
auth_param->auth_res_addr =
op->sym->auth.digest.phys_addr;
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- /*
- * If len(iv)==12B fw computes J0
- */
- if (ctx->cipher_iv.length == 12) {
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
- }
+
set_cipher_iv(ctx->cipher_iv.length,
ctx->cipher_iv.offset,
cipher_param, op, qat_req);
min_ofs = op->sym->aead.data.offset;
}
- if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
+ if (op->sym->m_src->nb_segs > 1 ||
+ (op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
do_sgl = 1;
/* adjust for chain case */
if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
min_ofs = 0;
- if (unlikely(op->sym->m_dst != NULL)) {
+ if (unlikely((op->sym->m_dst != NULL) &&
+ (op->sym->m_dst != op->sym->m_src))) {
/* Out-of-place operation (OOP)
* Don't align DMA start. DMA the minimum data-set
* so as not to overwrite data in dest buffer
*/
+ in_place = 0;
src_buf_start =
rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
dst_buf_start =
rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
+ oop_shift = min_ofs;
} else {
/* In-place operation
min_ofs);
}
dst_buf_start = src_buf_start;
+
+ /* remember any adjustment for later, note, can be +/- */
+ alignment_adjustment = src_buf_start -
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
}
if (do_cipher || do_aead) {
cipher_param->cipher_length = 0;
}
- if (do_auth || do_aead) {
- auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
+ if (!ctx->is_single_pass) {
+ /* Do not let to overwrite spc_aad len */
+ if (do_auth || do_aead) {
+ auth_param->auth_off =
+ (uint32_t)rte_pktmbuf_iova_offset(
op->sym->m_src, auth_ofs) - src_buf_start;
- auth_param->auth_len = auth_len;
- } else {
- auth_param->auth_off = 0;
- auth_param->auth_len = 0;
+ auth_param->auth_len = auth_len;
+ } else {
+ auth_param->auth_off = 0;
+ auth_param->auth_len = 0;
+ }
}
qat_req->comn_mid.dst_length =
(cipher_param->cipher_offset + cipher_param->cipher_length)
: (auth_param->auth_off + auth_param->auth_len);
+ if (do_auth && do_cipher) {
+ /* Handle digest-encrypted cases, i.e.
+ * auth-gen-then-cipher-encrypt and
+ * cipher-decrypt-then-auth-verify
+ */
+ /* First find the end of the data */
+ if (do_sgl) {
+ uint32_t remaining_off = auth_param->auth_off +
+ auth_param->auth_len + alignment_adjustment + oop_shift;
+ struct rte_mbuf *sgl_buf =
+ (in_place ?
+ op->sym->m_src : op->sym->m_dst);
+
+ while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+ && sgl_buf->next != NULL) {
+ remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+ sgl_buf = sgl_buf->next;
+ }
+
+ auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
+ sgl_buf, remaining_off);
+ } else {
+ auth_data_end = (in_place ?
+ src_buf_start : dst_buf_start) +
+ auth_param->auth_off + auth_param->auth_len;
+ }
+ /* Then check if digest-encrypted conditions are met */
+ if ((auth_param->auth_off + auth_param->auth_len <
+ cipher_param->cipher_offset +
+ cipher_param->cipher_length) &&
+ (op->sym->auth.digest.phys_addr ==
+ auth_data_end)) {
+ /* Handle partial digest encryption */
+ if (cipher_param->cipher_offset +
+ cipher_param->cipher_length <
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length)
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length;
+ struct icp_qat_fw_comn_req_hdr *header =
+ &qat_req->comn_hdr;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ }
+ }
+
if (do_sgl) {
ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
QAT_COMN_PTR_TYPE_SGL);
- ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
- &cookie->qat_sgl_src,
- qat_req->comn_mid.src_length);
- if (ret) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ ret = qat_sgl_fill_array(op->sym->m_src,
+ (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
+ &cookie->qat_sgl_src,
+ qat_req->comn_mid.src_length,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (unlikely(ret)) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
return ret;
}
- if (likely(op->sym->m_dst == NULL))
+ if (in_place)
qat_req->comn_mid.dest_data_addr =
qat_req->comn_mid.src_data_addr =
cookie->qat_sgl_src_phys_addr;
else {
ret = qat_sgl_fill_array(op->sym->m_dst,
- dst_buf_start,
- &cookie->qat_sgl_dst,
- qat_req->comn_mid.dst_length);
-
- if (ret) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot "
- "fill sgl array");
+ (int64_t)(dst_buf_start -
+ rte_pktmbuf_iova(op->sym->m_dst)),
+ &cookie->qat_sgl_dst,
+ qat_req->comn_mid.dst_length,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (unlikely(ret)) {
+ QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
return ret;
}
qat_req->comn_mid.dest_data_addr =
cookie->qat_sgl_dst_phys_addr;
}
+ qat_req->comn_mid.src_length = 0;
+ qat_req->comn_mid.dst_length = 0;
} else {
qat_req->comn_mid.src_data_addr = src_buf_start;
qat_req->comn_mid.dest_data_addr = dst_buf_start;
}
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "qat_req:", qat_req,
+ if (ctx->is_single_pass) {
+ if (ctx->is_ucs) {
+ /* GEN 4 */
+ cipher_param20->spc_aad_addr =
+ op->sym->aead.aad.phys_addr;
+ cipher_param20->spc_auth_res_addr =
+ op->sym->aead.digest.phys_addr;
+ } else {
+ cipher_param->spc_aad_addr =
+ op->sym->aead.aad.phys_addr;
+ cipher_param->spc_auth_res_addr =
+ op->sym->aead.digest.phys_addr;
+ }
+ } else if (ctx->is_single_pass_gmac &&
+ op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
+ /* Handle Single-Pass AES-GMAC */
+ handle_spc_gmac(ctx, op, cookie, qat_req);
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
- rte_hexdump(stdout, "src_data:",
+ QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
rte_pktmbuf_data_len(op->sym->m_src));
if (do_cipher) {
uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
uint8_t *,
ctx->cipher_iv.offset);
- rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
+ QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
ctx->cipher_iv.length);
}
uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
uint8_t *,
ctx->auth_iv.offset);
- rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
+ QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
ctx->auth_iv.length);
}
- rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
ctx->digest_length);
}
if (do_aead) {
- rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
ctx->digest_length);
- rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
+ QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
ctx->aad_len);
}
#endif
return 0;
}
-
-
-static void qat_stats_get(struct qat_pci_device *dev,
- struct qat_common_stats *stats,
- enum qat_service_type service)
-{
- int i;
- struct qat_qp **qp;
-
- if (stats == NULL || dev == NULL || service >= QAT_SERVICE_INVALID) {
- PMD_DRV_LOG(ERR, "invalid param: stats %p, dev %p, service %d",
- stats, dev, service);
- return;
- }
-
- qp = dev->qps_in_use[service];
- for (i = 0; i < ADF_MAX_QPS_PER_BUNDLE; i++) {
- if (qp[i] == NULL) {
- PMD_DRV_LOG(DEBUG, "Service %d Uninitialised qp %d",
- service, i);
- continue;
- }
-
- stats->enqueued_count += qp[i]->stats.enqueued_count;
- stats->dequeued_count += qp[i]->stats.dequeued_count;
- stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
- stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
- }
-}
-
-void qat_sym_stats_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_stats *stats)
-{
- struct qat_common_stats qat_stats = {0};
- struct qat_sym_dev_private *qat_priv;
-
- if (stats == NULL || dev == NULL) {
- PMD_DRV_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
- return;
- }
- qat_priv = dev->data->dev_private;
-
- qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_SYMMETRIC);
- stats->enqueued_count = qat_stats.enqueued_count;
- stats->dequeued_count = qat_stats.dequeued_count;
- stats->enqueue_err_count = qat_stats.enqueue_err_count;
- stats->dequeue_err_count = qat_stats.dequeue_err_count;
-}
-
-static void qat_stats_reset(struct qat_pci_device *dev,
- enum qat_service_type service)
-{
- int i;
- struct qat_qp **qp;
-
- if (dev == NULL || service >= QAT_SERVICE_INVALID) {
- PMD_DRV_LOG(ERR, "invalid param: dev %p, service %d",
- dev, service);
- return;
- }
-
- qp = dev->qps_in_use[service];
- for (i = 0; i < ADF_MAX_QPS_PER_BUNDLE; i++) {
- if (qp[i] == NULL) {
- PMD_DRV_LOG(DEBUG, "Service %d Uninitialised qp %d",
- service, i);
- continue;
- }
- memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
- }
-
- PMD_DRV_LOG(DEBUG, "QAT crypto: %d stats cleared", service);
-}
-
-void qat_sym_stats_reset(struct rte_cryptodev *dev)
-{
- struct qat_sym_dev_private *qat_priv;
-
- if (dev == NULL) {
- PMD_DRV_LOG(ERR, "invalid cryptodev ptr %p", dev);
- return;
- }
- qat_priv = dev->data->dev_private;
-
- qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_SYMMETRIC);
-
-}
-
-int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
-{
- struct qat_sym_dev_private *qat_private = dev->data->dev_private;
-
- PMD_DRV_LOG(DEBUG, "Release sym qp %u on device %d",
- queue_pair_id, dev->data->dev_id);
-
- qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][queue_pair_id]
- = NULL;
-
- return qat_qp_release((struct qat_qp **)
- &(dev->data->queue_pairs[queue_pair_id]));
-}
-
-int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
- const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id, struct rte_mempool *session_pool __rte_unused)
-{
- struct qat_qp *qp;
- int ret = 0;
- uint32_t i;
- struct qat_qp_config qat_qp_conf;
-
- struct qat_qp **qp_addr =
- (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
- struct qat_sym_dev_private *qat_private = dev->data->dev_private;
- const struct qat_qp_hw_data *sym_hw_qps =
- qp_gen_config[qat_private->qat_dev->qat_dev_gen]
- .qp_hw_data[QAT_SERVICE_SYMMETRIC];
- const struct qat_qp_hw_data *qp_hw_data = sym_hw_qps + qp_id;
-
- /* If qp is already in use free ring memory and qp metadata. */
- if (*qp_addr != NULL) {
- ret = qat_sym_qp_release(dev, qp_id);
- if (ret < 0)
- return ret;
- }
- if (qp_id >= qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC)) {
- PMD_DRV_LOG(ERR, "qp_id %u invalid for this device", qp_id);
- return -EINVAL;
- }
-
- qat_qp_conf.hw = qp_hw_data;
- qat_qp_conf.build_request = qat_sym_build_request;
- qat_qp_conf.process_response = qat_sym_process_response;
- qat_qp_conf.cookie_size = sizeof(struct qat_sym_op_cookie);
- qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors;
- qat_qp_conf.socket_id = socket_id;
- qat_qp_conf.service_str = "sym";
-
- ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
- if (ret != 0)
- return ret;
-
- /* store a link to the qp in the qat_pci_device */
- qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][qp_id]
- = *qp_addr;
-
- qp = (struct qat_qp *)*qp_addr;
-
- for (i = 0; i < qp->nb_descriptors; i++) {
-
- struct qat_sym_op_cookie *sql_cookie =
- qp->op_cookies[i];
-
- sql_cookie->qat_sgl_src_phys_addr =
- rte_mempool_virt2iova(sql_cookie) +
- offsetof(struct qat_sym_op_cookie,
- qat_sgl_src);
-
- sql_cookie->qat_sgl_dst_phys_addr =
- rte_mempool_virt2iova(sql_cookie) +
- offsetof(struct qat_sym_op_cookie,
- qat_sgl_dst);
- }
-
- return ret;
-}