1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2018 Intel Corporation
8 #include <rte_cryptodev_pmd.h>
9 #ifdef RTE_LIBRTE_SECURITY
10 #include <rte_net_crc.h>
14 #include <openssl/evp.h>
16 #include "qat_common.h"
17 #include "qat_sym_session.h"
18 #include "qat_sym_pmd.h"
22 /* bpi is only used for partial blocks of DES and AES
23 * so AES block len can be assumed as max len for iv, src and dst
25 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
28 * Maximum number of SGL entries
30 #define QAT_SYM_SGL_MAX_NUMBER 16
32 struct qat_sym_session;
36 struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
37 } __rte_packed __rte_cache_aligned;
39 struct qat_sym_op_cookie {
40 struct qat_sym_sgl qat_sgl_src;
41 struct qat_sym_sgl qat_sgl_dst;
42 phys_addr_t qat_sgl_src_phys_addr;
43 phys_addr_t qat_sgl_dst_phys_addr;
47 qat_sym_build_request(void *in_op, uint8_t *out_msg,
48 void *op_cookie, enum qat_device_gen qat_dev_gen);
51 /** Encrypt a single partial block
52 * Depends on openssl libcrypto
53 * Uses ECB+XOR to do CFB encryption, same result, more performant
56 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
57 uint8_t *iv, int ivlen, int srclen,
60 EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
62 uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
63 uint8_t *encr = encrypted_iv;
65 /* ECB method: encrypt the IV, then XOR this with plaintext */
66 if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
68 goto cipher_encrypt_err;
70 for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
76 QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
80 static inline uint32_t
81 qat_bpicipher_postprocess(struct qat_sym_session *ctx,
82 struct rte_crypto_op *op)
84 int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
85 struct rte_crypto_sym_op *sym_op = op->sym;
86 uint8_t last_block_len = block_len > 0 ?
87 sym_op->cipher.data.length % block_len : 0;
89 if (last_block_len > 0 &&
90 ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
92 /* Encrypt last block */
93 uint8_t *last_block, *dst, *iv;
94 uint32_t last_block_offset;
96 last_block_offset = sym_op->cipher.data.offset +
97 sym_op->cipher.data.length - last_block_len;
98 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
99 uint8_t *, last_block_offset);
101 if (unlikely(sym_op->m_dst != NULL))
102 /* out-of-place operation (OOP) */
103 dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
104 uint8_t *, last_block_offset);
108 if (last_block_len < sym_op->cipher.data.length)
109 /* use previous block ciphertext as IV */
110 iv = dst - block_len;
112 /* runt block, i.e. less than one full block */
113 iv = rte_crypto_op_ctod_offset(op, uint8_t *,
114 ctx->cipher_iv.offset);
116 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
117 QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
118 last_block, last_block_len);
119 if (sym_op->m_dst != NULL)
120 QAT_DP_HEXDUMP_LOG(DEBUG,
121 "BPI: dst before post-process:",
122 dst, last_block_len);
124 bpi_cipher_encrypt(last_block, dst, iv, block_len,
125 last_block_len, ctx->bpi_ctx);
126 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
127 QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
128 last_block, last_block_len);
129 if (sym_op->m_dst != NULL)
130 QAT_DP_HEXDUMP_LOG(DEBUG,
131 "BPI: dst after post-process:",
132 dst, last_block_len);
135 return sym_op->cipher.data.length - last_block_len;
138 #ifdef RTE_LIBRTE_SECURITY
140 qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
142 struct rte_crypto_sym_op *sym_op = op->sym;
143 uint32_t crc_offset, crc_length, crc;
146 if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
147 sym_op->auth.data.length != 0) {
149 crc_offset = sym_op->auth.data.offset;
150 crc_length = sym_op->auth.data.length;
151 crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
154 crc = rte_net_crc_calc(crc_data, crc_length, RTE_NET_CRC32_ETH);
156 if (crc != *(uint32_t *)(crc_data + crc_length))
157 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
163 qat_sym_process_response(void **op, uint8_t *resp)
165 struct icp_qat_fw_comn_resp *resp_msg =
166 (struct icp_qat_fw_comn_resp *)resp;
167 struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
168 (resp_msg->opaque_data);
169 struct qat_sym_session *sess;
171 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
172 QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
173 sizeof(struct icp_qat_fw_comn_resp));
176 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
177 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
178 resp_msg->comn_hdr.comn_status)) {
180 rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
182 #ifdef RTE_LIBRTE_SECURITY
183 uint8_t is_docsis_sec = 0;
185 if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
187 * Assuming at this point that if it's a security
188 * op, that this is for DOCSIS
190 sess = (struct qat_sym_session *)
191 get_sec_session_private_data(
192 rx_op->sym->sec_session);
197 sess = (struct qat_sym_session *)
198 get_sym_session_private_data(
203 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
206 qat_bpicipher_postprocess(sess, rx_op);
207 #ifdef RTE_LIBRTE_SECURITY
209 qat_crc_verify(sess, rx_op);
218 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
222 #endif /* _QAT_SYM_H_ */