#include <rte_spinlock.h>
#include <rte_hexdump.h>
#include <rte_crypto_sym.h>
-#include <rte_cryptodev_pci.h>
+#include <rte_byteorder.h>
+#include <rte_pci.h>
+
#include <openssl/evp.h>
#include "qat_logs.h"
/* AEAD */
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ /* AES-GCM and AES-CCM works with different direction
+ * GCM first encrypts and generate hash where AES-CCM
+ * first generate hash and encrypts. Similar relation
+ * applies to decryption.
+ */
if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
- return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ else
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
else
- return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ else
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
}
if (xform->next == NULL)
struct qat_session *session)
{
struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ enum rte_crypto_auth_operation crypto_operation;
/*
* Store AEAD IV parameters as cipher IV,
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
break;
case RTE_CRYPTO_AEAD_AES_CCM:
- PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported AEAD alg %u",
- aead_xform->algo);
- return -ENOTSUP;
+ if (qat_alg_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
+ break;
default:
PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
aead_xform->algo);
return -EINVAL;
}
- if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
+ (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
/*
* It needs to create cipher desc content first,
* then authentication
*/
+
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
+
if (qat_alg_aead_session_create_content_desc_cipher(session,
aead_xform->key.data,
aead_xform->key.length))
aead_xform->key.length,
aead_xform->aad_length,
aead_xform->digest_length,
- RTE_CRYPTO_AUTH_OP_GENERATE))
+ crypto_operation))
return -EINVAL;
} else {
session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
* It needs to create authentication desc content first,
* then cipher
*/
+
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
+
if (qat_alg_aead_session_create_content_desc_auth(session,
aead_xform->key.data,
aead_xform->key.length,
aead_xform->aad_length,
aead_xform->digest_length,
- RTE_CRYPTO_AUTH_OP_VERIFY))
+ crypto_operation))
return -EINVAL;
if (qat_alg_aead_session_create_content_desc_cipher(session,
return sym_op->cipher.data.length - last_block_len;
}
+static inline void
+txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, q->tail);
+ q->nb_pending_requests = 0;
+ q->csr_tail = q->tail;
+}
+
uint16_t
qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
cur_op++;
}
kick_tail:
- WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
- queue->hw_queue_number, tail);
queue->tail = tail;
tmp_qp->stats.enqueued_count += nb_ops_sent;
+ queue->nb_pending_requests += nb_ops_sent;
+ if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
+ queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
+ txq_write_tail(tmp_qp, queue);
+ }
return nb_ops_sent;
}
qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- struct qat_queue *queue;
+ struct qat_queue *rx_queue, *tx_queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t msg_counter = 0;
struct rte_crypto_op *rx_op;
struct icp_qat_fw_comn_resp *resp_msg;
uint32_t head;
- queue = &(tmp_qp->rx_q);
- head = queue->head;
+ rx_queue = &(tmp_qp->rx_q);
+ tx_queue = &(tmp_qp->tx_q);
+ head = rx_queue->head;
resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)queue->base_addr + head);
+ ((uint8_t *)rx_queue->base_addr + head);
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
msg_counter != nb_ops) {
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
sizeof(struct icp_qat_fw_comn_resp));
-
#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
- head = adf_modulo(head + queue->msg_size, queue->modulo);
+ head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)queue->base_addr + head);
+ ((uint8_t *)rx_queue->base_addr + head);
*ops = rx_op;
ops++;
msg_counter++;
}
if (msg_counter > 0) {
- queue->head = head;
+ rx_queue->head = head;
tmp_qp->stats.dequeued_count += msg_counter;
- queue->nb_processed_responses += msg_counter;
+ rx_queue->nb_processed_responses += msg_counter;
tmp_qp->inflights16 -= msg_counter;
- if (queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
- rxq_free_desc(tmp_qp, queue);
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(tmp_qp, rx_queue);
+ }
+ /* also check if tail needs to be advanced */
+ if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
+ tx_queue->tail != tx_queue->csr_tail) {
+ txq_write_tail(tmp_qp, tx_queue);
}
return msg_counter;
}
}
}
+/** Set IV for CCM is special case, 0th byte is set to q-1
+ * where q is padding of nonce in 16 byte block
+ */
+static inline void
+set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
+{
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+}
+
static inline int
qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
return -EINVAL;
}
+
+
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- /* AES-GCM */
+ /* AES-GCM or AES-CCM */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+ && ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
do_aead = 1;
} else {
do_auth = 1;
}
if (do_aead) {
+ /*
+ * This address may used for setting AAD physical pointer
+ * into IV offset from op
+ */
+ phys_addr_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg ==
ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
}
+ set_cipher_iv(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
+
+ /* In case of AES-CCM this may point to user selected memory
+ * or iv offset in cypto_op
+ */
+ uint8_t *aad_data = op->sym->aead.aad.data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len = 0;
+
+ uint8_t aad_len_field_sz = 0;
+ uint32_t msg_len_be =
+ rte_bswap32(op->sym->aead.data.length);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ /*
+ * aad_len not greater than 18, so no actual aad data,
+ * then use IV after op for B0 block
+ */
+ aad_data = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+ aad_phys_addr_aead =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->cipher_iv.offset);
+ }
+
+ uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
+ ctx->digest_length, q);
+
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET
+ + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
+ = rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx],
+ 0, pad_len);
+ }
+
+ }
+
+ set_cipher_iv_ccm(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, q,
+ aad_len_field_sz);
+
}
cipher_len = op->sym->aead.data.length;
auth_len = op->sym->aead.data.length;
auth_ofs = op->sym->aead.data.offset;
- auth_param->u1.aad_adr = op->sym->aead.aad.phys_addr;
+ auth_param->u1.aad_adr = aad_phys_addr_aead;
auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
- set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
- cipher_param, op, qat_req);
min_ofs = op->sym->aead.data.offset;
}