/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <rte_spinlock.h>
#include <rte_hexdump.h>
#include <rte_crypto_sym.h>
+#include <rte_cryptodev_pci.h>
#include <openssl/evp.h>
#include "qat_logs.h"
/* Get cipher xform from crypto xform chain */
cipher_xform = qat_get_cipher_xform(xform);
+ session->cipher_iv.offset = cipher_xform->iv.offset;
+ session->cipher_iv.length = cipher_xform->iv.length;
+
switch (cipher_xform->algo) {
case RTE_CRYPTO_CIPHER_AES_CBC:
if (qat_alg_validate_aes_key(cipher_xform->key.length,
}
cipher_xform = qat_get_cipher_xform(xform);
+ session->auth_iv.offset = auth_xform->iv.offset;
+ session->auth_iv.length = auth_xform->iv.length;
+
if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
(session->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
iv = last_block - block_len;
else
/* runt block, i.e. less than one full block */
- iv = sym_op->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
iv = dst - block_len;
else
/* runt block, i.e. less than one full block */
- iv = sym_op->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
rte_hexdump(stdout, "BPI: src before post-process:", last_block,
uint32_t min_ofs = 0;
uint64_t src_buf_start = 0, dst_buf_start = 0;
uint8_t do_sgl = 0;
-
+ uint8_t *cipher_iv_ptr = NULL;
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
return -EINVAL;
}
#endif
- if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
" requests, op (%p) is sessionless.", op);
return -EINVAL;
cipher_ofs = op->sym->cipher.data.offset;
}
+ cipher_iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
/* copy IV into request if it fits */
- /*
- * If IV length is zero do not copy anything but still
- * use request descriptor embedded IV
- *
- */
- if (op->sym->cipher.iv.length) {
- if (op->sym->cipher.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array)) {
- rte_memcpy(cipher_param->u.cipher_IV_array,
- op->sym->cipher.iv.data,
- op->sym->cipher.iv.length);
- } else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr =
- op->sym->cipher.iv.phys_addr;
- }
+ if (ctx->cipher_iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array)) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ cipher_iv_ptr,
+ ctx->cipher_iv.length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->cipher_iv.offset);
}
min_ofs = cipher_ofs;
}
auth_len = auth_len + auth_ofs + 1;
auth_ofs = 0;
}
- }
+ } else
+ auth_param->u1.aad_adr =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->auth_iv.offset);
} else if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
auth_ofs = op->sym->cipher.data.offset;
auth_len = op->sym->cipher.data.length;
+
+ auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
} else {
auth_ofs = op->sym->auth.data.offset;
auth_len = op->sym->auth.data.length;
+
}
min_ofs = auth_ofs;
auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
- auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
-
}
if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- if (op->sym->cipher.iv.length == 12) {
+ if (ctx->cipher_iv.length == 12) {
/*
- * For GCM a 12 bit IV is allowed,
+ * For GCM a 12 byte IV is allowed,
* but we need to inform the f/w
*/
ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
cipher_param->cipher_length = 0;
cipher_param->cipher_offset = 0;
auth_param->u1.aad_adr = 0;
- auth_param->auth_len = op->sym->auth.aad.length;
+ auth_param->auth_len = ctx->aad_len;
auth_param->auth_off = op->sym->auth.data.offset;
auth_param->u2.aad_sz = 0;
}
rte_hexdump(stdout, "src_data:",
rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
rte_pktmbuf_data_len(op->sym->m_src));
- rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
- op->sym->cipher.iv.length);
- rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
- op->sym->auth.digest.length);
- rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
- op->sym->auth.aad.length);
+ if (do_cipher)
+ rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
+ ctx->cipher_iv.length);
+
+ if (do_auth) {
+ if (ctx->auth_iv.length) {
+ uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->auth_iv.offset);
+ rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
+ ctx->auth_iv.length);
+ }
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ op->sym->auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
+ ctx->aad_len);
+ }
#endif
return 0;
}