1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2018 Intel Corporation
8 #include <rte_cryptodev_pmd.h>
11 #include <openssl/evp.h>
13 #include "qat_common.h"
14 #include "qat_sym_session.h"
15 #include "qat_sym_pmd.h"
19 /* bpi is only used for partial blocks of DES and AES
20 * so AES block len can be assumed as max len for iv, src and dst
22 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
24 struct qat_sym_session;
26 struct qat_sym_op_cookie {
27 struct qat_sgl qat_sgl_src;
28 struct qat_sgl qat_sgl_dst;
29 phys_addr_t qat_sgl_src_phys_addr;
30 phys_addr_t qat_sgl_dst_phys_addr;
34 qat_sym_build_request(void *in_op, uint8_t *out_msg,
35 void *op_cookie, enum qat_device_gen qat_dev_gen);
38 /** Encrypt a single partial block
39 * Depends on openssl libcrypto
40 * Uses ECB+XOR to do CFB encryption, same result, more performant
43 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
44 uint8_t *iv, int ivlen, int srclen,
47 EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
49 uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
50 uint8_t *encr = encrypted_iv;
52 /* ECB method: encrypt the IV, then XOR this with plaintext */
53 if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
55 goto cipher_encrypt_err;
57 for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
63 QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
67 static inline uint32_t
68 qat_bpicipher_postprocess(struct qat_sym_session *ctx,
69 struct rte_crypto_op *op)
71 int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
72 struct rte_crypto_sym_op *sym_op = op->sym;
73 uint8_t last_block_len = block_len > 0 ?
74 sym_op->cipher.data.length % block_len : 0;
76 if (last_block_len > 0 &&
77 ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
79 /* Encrypt last block */
80 uint8_t *last_block, *dst, *iv;
81 uint32_t last_block_offset;
83 last_block_offset = sym_op->cipher.data.offset +
84 sym_op->cipher.data.length - last_block_len;
85 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
86 uint8_t *, last_block_offset);
88 if (unlikely(sym_op->m_dst != NULL))
89 /* out-of-place operation (OOP) */
90 dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
91 uint8_t *, last_block_offset);
95 if (last_block_len < sym_op->cipher.data.length)
96 /* use previous block ciphertext as IV */
99 /* runt block, i.e. less than one full block */
100 iv = rte_crypto_op_ctod_offset(op, uint8_t *,
101 ctx->cipher_iv.offset);
103 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
104 QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
105 last_block, last_block_len);
106 if (sym_op->m_dst != NULL)
107 QAT_DP_HEXDUMP_LOG(DEBUG,
108 "BPI: dst before post-process:",
109 dst, last_block_len);
111 bpi_cipher_encrypt(last_block, dst, iv, block_len,
112 last_block_len, ctx->bpi_ctx);
113 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
114 QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
115 last_block, last_block_len);
116 if (sym_op->m_dst != NULL)
117 QAT_DP_HEXDUMP_LOG(DEBUG,
118 "BPI: dst after post-process:",
119 dst, last_block_len);
122 return sym_op->cipher.data.length - last_block_len;
126 qat_sym_process_response(void **op, uint8_t *resp)
129 struct icp_qat_fw_comn_resp *resp_msg =
130 (struct icp_qat_fw_comn_resp *)resp;
131 struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
132 (resp_msg->opaque_data);
134 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
135 QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
136 sizeof(struct icp_qat_fw_comn_resp));
139 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
140 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
141 resp_msg->comn_hdr.comn_status)) {
143 rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
145 struct qat_sym_session *sess = (struct qat_sym_session *)
146 get_session_private_data(
148 cryptodev_qat_driver_id);
152 qat_bpicipher_postprocess(sess, rx_op);
153 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
160 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
164 #endif /* _QAT_SYM_H_ */