nb_fw_responses = 1;
if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
- qat_sym_process_response(ops, resp_msg);
+ qat_sym_process_response(ops, resp_msg,
+ tmp_qp->op_cookies[head >> rx_queue->trailz]);
else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
nb_fw_responses = qat_comp_process_response(
ops, resp_msg,
iv_length);
}
+/** Handle Single-Pass AES-GMAC on QAT GEN3 */
+static inline void
+handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
+ struct qat_sym_op_cookie *cookie,
+ struct icp_qat_fw_la_bulk_req *qat_req)
+{
+ static const uint32_t ver_key_offset =
+ sizeof(struct icp_qat_hw_auth_setup) +
+ ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+ ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+ ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+ sizeof(struct icp_qat_hw_cipher_config);
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
+ (void *) &qat_req->cd_ctrl;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param =
+ (void *) &qat_req->serv_specif_rqpars;
+ uint32_t data_length = op->sym->auth.data.length;
+
+ /* Fill separate Content Descriptor for this op */
+ rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+ ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+ ctx->cd.cipher.key :
+ RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+ ctx->auth_key_length);
+ cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+ ICP_QAT_HW_CIPHER_AEAD_MODE,
+ ctx->qat_cipher_alg,
+ ICP_QAT_HW_CIPHER_NO_CONVERT,
+ (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+ ICP_QAT_HW_CIPHER_ENCRYPT :
+ ICP_QAT_HW_CIPHER_DECRYPT));
+ QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+ ctx->digest_length,
+ QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+ QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+ cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
+
+ /* Update the request */
+ qat_req->cd_pars.u.s.content_desc_addr =
+ cookie->opt.spc_gmac.cd_phys_addr;
+ qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+ sizeof(struct icp_qat_hw_cipher_config) +
+ ctx->auth_key_length, 8) >> 3;
+ qat_req->comn_mid.src_length = data_length;
+ qat_req->comn_mid.dst_length = 0;
+
+ cipher_param->spc_aad_addr = 0;
+ cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
+ cipher_param->spc_aad_sz = data_length;
+ cipher_param->reserved = 0;
+ cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+ qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+ cipher_cd_ctrl->cipher_cfg_offset = 0;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+ ICP_QAT_FW_LA_PROTO_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_NO_PROTO);
+}
+
int
qat_sym_build_request(void *in_op, uint8_t *out_msg,
void *op_cookie, enum qat_device_gen qat_dev_gen)
qat_req->comn_mid.dest_data_addr = dst_buf_start;
}
- /* Handle Single-Pass GCM */
if (ctx->is_single_pass) {
+ /* Handle Single-Pass GCM */
cipher_param->spc_aad_addr = op->sym->aead.aad.phys_addr;
cipher_param->spc_auth_res_addr =
op->sym->aead.digest.phys_addr;
+ } else if (ctx->is_single_pass_gmac &&
+ op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
+ /* Handle Single-Pass AES-GMAC */
+ handle_spc_gmac(ctx, op, cookie, qat_req);
}
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
*/
#define QAT_SYM_SGL_MAX_NUMBER 16
+/* Maximum data length for single pass GMAC: 2^14-1 */
+#define QAT_AES_GMAC_SPC_MAX_SIZE 16383
+
struct qat_sym_session;
struct qat_sym_sgl {
struct qat_sym_sgl qat_sgl_dst;
phys_addr_t qat_sgl_src_phys_addr;
phys_addr_t qat_sgl_dst_phys_addr;
+ union {
+ /* Used for Single-Pass AES-GMAC only */
+ struct {
+ struct icp_qat_hw_cipher_algo_blk cd_cipher
+ __rte_packed __rte_cache_aligned;
+ phys_addr_t cd_phys_addr;
+ } spc_gmac;
+ } opt;
};
int
#endif
static inline void
-qat_sym_process_response(void **op, uint8_t *resp)
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
{
struct icp_qat_fw_comn_resp *resp_msg =
(struct icp_qat_fw_comn_resp *)resp;
struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
(resp_msg->opaque_data);
struct qat_sym_session *sess;
+ uint8_t is_docsis_sec;
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
sizeof(struct icp_qat_fw_comn_resp));
#endif
+#ifdef RTE_LIB_SECURITY
+ if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ /*
+ * Assuming at this point that if it's a security
+ * op, that this is for DOCSIS
+ */
+ sess = (struct qat_sym_session *)
+ get_sec_session_private_data(
+ rx_op->sym->sec_session);
+ is_docsis_sec = 1;
+ } else
+#endif
+ {
+ sess = (struct qat_sym_session *)
+ get_sym_session_private_data(
+ rx_op->sym->session,
+ qat_sym_driver_id);
+ is_docsis_sec = 0;
+ }
+
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
-#ifdef RTE_LIB_SECURITY
- uint8_t is_docsis_sec = 0;
-
- if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
- /*
- * Assuming at this point that if it's a security
- * op, that this is for DOCSIS
- */
- sess = (struct qat_sym_session *)
- get_sec_session_private_data(
- rx_op->sym->sec_session);
- is_docsis_sec = 1;
- } else
-#endif
- {
- sess = (struct qat_sym_session *)
- get_sym_session_private_data(
- rx_op->sym->session,
- qat_sym_driver_id);
- }
-
rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
if (sess->bpi_ctx) {
#endif
}
}
+
+ if (sess->is_single_pass_gmac) {
+ struct qat_sym_op_cookie *cookie =
+ (struct qat_sym_op_cookie *) op_cookie;
+ memset(cookie->opt.spc_gmac.cd_cipher.key, 0,
+ sess->auth_key_length);
+ }
+
*op = (void *)rx_op;
}
}
static inline void
-qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
+qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
+ void *op_cookie __rte_unused)
{
}
rte_mempool_virt2iova(cookie) +
offsetof(struct qat_sym_op_cookie,
qat_sgl_dst);
+
+ cookie->opt.spc_gmac.cd_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ opt.spc_gmac.cd_cipher);
+
}
/* Get fw version from QAT (GEN2), skip if we've got it already */
struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_sym_session *session = session_private;
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
int ret;
int qat_cmd_id;
ret = qat_sym_session_configure_auth(dev, xform, session);
if (ret < 0)
return ret;
+ session->is_single_pass_gmac =
+ qat_dev_gen == QAT_GEN3 &&
+ xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
+ xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
break;
case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
struct qat_sym_dev_private *internals = dev->data->dev_private;
const uint8_t *key_data = auth_xform->key.data;
uint8_t key_length = auth_xform->key.length;
- session->aes_cmac = 0;
+ session->aes_cmac = 0;
+ session->auth_key_length = auth_xform->key.length;
session->auth_iv.offset = auth_xform->iv.offset;
session->auth_iv.length = auth_xform->iv.length;
session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
if (session->auth_iv.length == 0)
session->auth_iv.length = AES_GCM_J0_LEN;
-
break;
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
/* 96-bit case of IV for CCP/GCM single pass algorithm */
#define QAT_AES_GCM_SPC_IV_SIZE 12
-
#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
ICP_QAT_HW_CIPHER_NO_CONVERT, \
uint16_t offset;
uint16_t length;
} auth_iv;
+ uint16_t auth_key_length;
uint16_t digest_length;
rte_spinlock_t lock; /* protects this struct */
enum qat_device_gen min_qat_dev_gen;
uint8_t aes_cmac;
uint8_t is_single_pass;
+ uint8_t is_single_pass_gmac;
};
int