-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#include <stdio.h>
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_tailq.h>
#include <rte_malloc.h>
#include <rte_launch.h>
#include <rte_spinlock.h>
#include <rte_hexdump.h>
#include <rte_crypto_sym.h>
-#include <rte_cryptodev_pci.h>
+#include <rte_byteorder.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
#include <openssl/evp.h>
#include "qat_logs.h"
#include "adf_transport_access_macros.h"
#define BYTE_LENGTH 8
+/* bpi is only used for partial blocks of DES and AES
+ * so AES block len can be assumed as max len for iv, src and dst
+ */
+#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
static int
qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
{
EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
int encrypted_ivlen;
- uint8_t encrypted_iv[16];
- int i;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
/* ECB method: encrypt the IV, then XOR this with plaintext */
if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
<= 0)
goto cipher_encrypt_err;
- for (i = 0; i < srclen; i++)
- *(dst+i) = *(src+i)^(encrypted_iv[i]);
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
return 0;
{
EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
int encrypted_ivlen;
- uint8_t encrypted_iv[16];
- int i;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
<= 0)
goto cipher_decrypt_err;
- for (i = 0; i < srclen; i++)
- *(dst+i) = *(src+i)^(encrypted_iv[i]);
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
return 0;
cipher_decrypt_err:
- PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed");
+ PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
return -EINVAL;
}
/* AEAD */
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ /* AES-GCM and AES-CCM works with different direction
+ * GCM first encrypts and generate hash where AES-CCM
+ * first generate hash and encrypts. Similar relation
+ * applies to decryption.
+ */
if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
- return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ else
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
else
- return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ else
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
}
if (xform->next == NULL)
PMD_INIT_FUNC_TRACE();
/* Set context descriptor physical address */
- session->cd_paddr = rte_mempool_virt2phy(NULL, session) +
+ session->cd_paddr = rte_mempool_virt2iova(session) +
offsetof(struct qat_session, cd);
session->min_qat_dev_gen = QAT_GEN1;
struct qat_session *session)
{
struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ enum rte_crypto_auth_operation crypto_operation;
/*
* Store AEAD IV parameters as cipher IV,
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
break;
case RTE_CRYPTO_AEAD_AES_CCM:
- PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported AEAD alg %u",
- aead_xform->algo);
- return -ENOTSUP;
+ if (qat_alg_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
+ break;
default:
PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
aead_xform->algo);
return -EINVAL;
}
- if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
+ (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
/*
* It needs to create cipher desc content first,
* then authentication
*/
+
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
+
if (qat_alg_aead_session_create_content_desc_cipher(session,
aead_xform->key.data,
aead_xform->key.length))
aead_xform->key.length,
aead_xform->aad_length,
aead_xform->digest_length,
- RTE_CRYPTO_AUTH_OP_GENERATE))
+ crypto_operation))
return -EINVAL;
} else {
session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
* It needs to create authentication desc content first,
* then cipher
*/
+
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
+
if (qat_alg_aead_session_create_content_desc_auth(session,
aead_xform->key.data,
aead_xform->key.length,
aead_xform->aad_length,
aead_xform->digest_length,
- RTE_CRYPTO_AUTH_OP_VERIFY))
+ crypto_operation))
return -EINVAL;
if (qat_alg_aead_session_create_content_desc_cipher(session,
qat_bpicipher_preprocess(struct qat_session *ctx,
struct rte_crypto_op *op)
{
- uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
struct rte_crypto_sym_op *sym_op = op->sym;
uint8_t last_block_len = block_len > 0 ?
sym_op->cipher.data.length % block_len : 0;
qat_bpicipher_postprocess(struct qat_session *ctx,
struct rte_crypto_op *op)
{
- uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
struct rte_crypto_sym_op *sym_op = op->sym;
uint8_t last_block_len = block_len > 0 ?
sym_op->cipher.data.length % block_len : 0;
void *cur_desc = (uint8_t *)q->base_addr + old_head;
if (new_head < old_head) {
- memset(cur_desc, ADF_RING_EMPTY_SIG, max_head - old_head);
- memset(q->base_addr, ADF_RING_EMPTY_SIG, new_head);
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
+ memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
} else {
- memset(cur_desc, ADF_RING_EMPTY_SIG, new_head - old_head);
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
}
q->nb_processed_responses = 0;
q->csr_head = new_head;
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
sizeof(struct icp_qat_fw_comn_resp));
-
#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
{
int nr = 1;
- uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
+ uint32_t buf_len = rte_pktmbuf_iova(buf) -
buff_start + rte_pktmbuf_data_len(buf);
list->bufers[0].addr = buff_start;
list->bufers[nr].len = rte_pktmbuf_data_len(buf);
list->bufers[nr].resrvd = 0;
- list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
+ list->bufers[nr].addr = rte_pktmbuf_iova(buf);
buf_len += list->bufers[nr].len;
buf = buf->next;
}
}
+/** Set IV for CCM is special case, 0th byte is set to q-1
+ * where q is padding of nonce in 16 byte block
+ */
+static inline void
+set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
+{
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+}
+
static inline int
qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
return -EINVAL;
}
+
+
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- /* AES-GCM */
+ /* AES-GCM or AES-CCM */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+ && ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
do_aead = 1;
} else {
do_auth = 1;
}
min_ofs = auth_ofs;
- auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
+ if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
+ auth_param->auth_res_addr =
+ op->sym->auth.digest.phys_addr;
}
if (do_aead) {
+ /*
+ * This address may used for setting AAD physical pointer
+ * into IV offset from op
+ */
+ rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg ==
ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
}
+ set_cipher_iv(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
+
+ /* In case of AES-CCM this may point to user selected memory
+ * or iv offset in cypto_op
+ */
+ uint8_t *aad_data = op->sym->aead.aad.data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len = 0;
+
+ uint8_t aad_len_field_sz = 0;
+ uint32_t msg_len_be =
+ rte_bswap32(op->sym->aead.data.length);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ /*
+ * aad_len not greater than 18, so no actual aad data,
+ * then use IV after op for B0 block
+ */
+ aad_data = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+ aad_phys_addr_aead =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->cipher_iv.offset);
+ }
+
+ uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
+ ctx->digest_length, q);
+
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET
+ + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
+ = rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx],
+ 0, pad_len);
+ }
+
+ }
+
+ set_cipher_iv_ccm(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, q,
+ aad_len_field_sz);
+
}
cipher_len = op->sym->aead.data.length;
auth_len = op->sym->aead.data.length;
auth_ofs = op->sym->aead.data.offset;
- auth_param->u1.aad_adr = op->sym->aead.aad.phys_addr;
+ auth_param->u1.aad_adr = aad_phys_addr_aead;
auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
- set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
- cipher_param, op, qat_req);
min_ofs = op->sym->aead.data.offset;
}
* so as not to overwrite data in dest buffer
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
dst_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
+ rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
} else {
/* In-place operation
* Start DMA at nearest aligned address below min_ofs
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
& QAT_64_BTYE_ALIGN_MASK;
- if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
+ if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
rte_pktmbuf_headroom(op->sym->m_src))
> src_buf_start)) {
/* alignment has pushed addr ahead of start of mbuf
* so revert and take the performance hit
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ rte_pktmbuf_iova_offset(op->sym->m_src,
min_ofs);
}
dst_buf_start = src_buf_start;
if (do_cipher || do_aead) {
cipher_param->cipher_offset =
- (uint32_t)rte_pktmbuf_mtophys_offset(
+ (uint32_t)rte_pktmbuf_iova_offset(
op->sym->m_src, cipher_ofs) - src_buf_start;
cipher_param->cipher_length = cipher_len;
} else {
}
if (do_auth || do_aead) {
- auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
+ auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
op->sym->m_src, auth_ofs) - src_buf_start;
auth_param->auth_len = auth_len;
} else {