* ZUC EEA3/EIA3 is not supported by dh895xcc devices
* Maximum additional authenticated data (AAD) for GCM is 240 bytes long and must be passed to the device in a buffer rounded up to the nearest block-size multiple (x16) and padded with zeros.
* Queue pairs are not thread-safe (that is, within a single queue pair, RX and TX from different lcores is not supported).
+* A GCM limitation exists, but only in the case where there are multiple
+ generations of QAT devices on a single platform.
+ To optimise performance, the GCM crypto session should be initialised for the
+ device generation to which the ops will be enqueued. Specifically if a GCM
+ session is initialised on a GEN2 device, but then attached to an op enqueued
+ to a GEN3 device, it will work but cannot take advantage of hardware
+ optimisations in the GEN3 device. And if a GCM session is initialised on a
+ GEN3 device, then attached to an op sent to a GEN1/GEN2 device, it will not be
+ enqueued to the device and will be marked as failed. The simplest way to
+ mitigate this is to use the bdf whitelist to avoid mixing devices of different
+ generations in the same process if planning to use for GCM.
Extra notes on KASUMI F9
~~~~~~~~~~~~~~~~~~~~~~~~
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2019 Intel Corporation
*/
#include <openssl/evp.h>
#include "qat_sym.h"
+
/** Decrypt a single partial block
* Depends on openssl libcrypto
* Uses ECB+XOR to do CFB encryption, same result, more performant
rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
cipher_param = (void *)&qat_req->serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
qat_req->comn_mid.dest_data_addr = dst_buf_start;
}
+ /* Handle Single-Pass GCM */
+ if (ctx->is_single_pass) {
+ cipher_param->spc_aad_addr = op->sym->aead.aad.phys_addr;
+ cipher_param->spc_auth_res_addr =
+ op->sym->aead.digest.phys_addr;
+ }
+
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
break;
case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
- ret = qat_sym_session_configure_aead(xform,
+ ret = qat_sym_session_configure_aead(dev, xform,
session);
if (ret < 0)
return ret;
break;
case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
- ret = qat_sym_session_configure_aead(xform,
+ ret = qat_sym_session_configure_aead(dev, xform,
session);
if (ret < 0)
return ret;
return 0;
}
+static int
+qat_sym_session_handle_single_pass(struct qat_sym_dev_private *internals,
+ struct qat_sym_session *session,
+ struct rte_crypto_aead_xform *aead_xform)
+{
+ enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+
+ if (qat_dev_gen == QAT_GEN3 &&
+ aead_xform->iv.length == QAT_AES_GCM_SPC_IV_SIZE) {
+ /* Use faster Single-Pass GCM */
+ struct icp_qat_fw_la_cipher_req_params *cipher_param =
+ (void *) &session->fw_req.serv_specif_rqpars;
+
+ session->is_single_pass = 1;
+ session->min_qat_dev_gen = QAT_GEN3;
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
+ session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
+ session->cipher_iv.offset = aead_xform->iv.offset;
+ session->cipher_iv.length = aead_xform->iv.length;
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ aead_xform->key.data, aead_xform->key.length))
+ return -EINVAL;
+ session->aad_len = aead_xform->aad_length;
+ session->digest_length = aead_xform->digest_length;
+ if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
+ ICP_QAT_FW_LA_RET_AUTH_SET(
+ session->fw_req.comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_RET_AUTH_RES);
+ } else {
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
+ ICP_QAT_FW_LA_CMP_AUTH_SET(
+ session->fw_req.comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_CMP_AUTH_RES);
+ }
+ ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+ session->fw_req.comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+ ICP_QAT_FW_LA_PROTO_SET(
+ session->fw_req.comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_NO_PROTO);
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ session->fw_req.comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ session->fw_req.comn_hdr.service_cmd_id =
+ ICP_QAT_FW_LA_CMD_CIPHER;
+ session->cd.cipher.cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+ ICP_QAT_HW_CIPHER_AEAD_MODE,
+ session->qat_cipher_alg,
+ ICP_QAT_HW_CIPHER_NO_CONVERT,
+ session->qat_dir);
+ QAT_FIELD_SET(session->cd.cipher.cipher_config.val,
+ aead_xform->digest_length,
+ QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+ QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+ session->cd.cipher.cipher_config.reserved =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
+ aead_xform->aad_length);
+ cipher_param->spc_aad_sz = aead_xform->aad_length;
+ cipher_param->spc_auth_res_sz = aead_xform->digest_length;
+ }
+ return 0;
+}
+
int
qat_sym_session_configure_auth(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
}
int
-qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+qat_sym_session_configure_aead(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
struct qat_sym_session *session)
{
struct rte_crypto_aead_xform *aead_xform = &xform->aead;
return -EINVAL;
}
+ session->is_single_pass = 0;
+ if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ /* Use faster Single-Pass GCM if possible */
+ int res = qat_sym_session_handle_single_pass(
+ dev->data->dev_private, session, aead_xform);
+ if (res < 0)
+ return res;
+ if (session->is_single_pass)
+ return 0;
+ }
+
if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
struct icp_qat_fw_la_auth_req_params *auth_param =
(struct icp_qat_fw_la_auth_req_params *)
((char *)&req_tmpl->serv_specif_rqpars +
- sizeof(struct icp_qat_fw_la_cipher_req_params));
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
uint16_t state1_size = 0, state2_size = 0;
uint16_t hash_offset, cd_size;
uint32_t *aad_len = NULL;
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2019 Intel Corporation
*/
#ifndef _QAT_SYM_SESSION_H_
#define _QAT_SYM_SESSION_H_
#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
#define QAT_3DES_KEY_SZ_OPT3 8 /* K1=K2=K3 */
+/* 96-bit case of IV for CCP/GCM single pass algorithm */
+#define QAT_AES_GCM_SPC_IV_SIZE 12
+
#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
rte_spinlock_t lock; /* protects this struct */
enum qat_device_gen min_qat_dev_gen;
uint8_t aes_cmac;
+ uint8_t is_single_pass;
};
int
struct rte_crypto_sym_xform *xform, void *session_private);
int
-qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+qat_sym_session_configure_aead(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
struct qat_sym_session *session);
int