e3ec7f0de4cc070ff7793c81309477623e57b21f
[dpdk.git] / drivers / crypto / qat / qat_sym.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2018 Intel Corporation
3  */
4
5 #ifndef _QAT_SYM_H_
6 #define _QAT_SYM_H_
7
8 #include <cryptodev_pmd.h>
9 #ifdef RTE_LIB_SECURITY
10 #include <rte_net_crc.h>
11 #endif
12
13 #ifdef BUILD_QAT_SYM
14 #include <openssl/evp.h>
15
16 #include "qat_common.h"
17 #include "qat_sym_session.h"
18 #include "qat_sym_pmd.h"
19 #include "qat_logs.h"
20
21 #define BYTE_LENGTH    8
22 /* bpi is only used for partial blocks of DES and AES
23  * so AES block len can be assumed as max len for iv, src and dst
24  */
25 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
26
27 /*
28  * Maximum number of SGL entries
29  */
30 #define QAT_SYM_SGL_MAX_NUMBER  16
31
32 /* Maximum data length for single pass GMAC: 2^14-1 */
33 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
34
35 struct qat_sym_session;
36
37 struct qat_sym_sgl {
38         qat_sgl_hdr;
39         struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
40 } __rte_packed __rte_cache_aligned;
41
42 struct qat_sym_op_cookie {
43         struct qat_sym_sgl qat_sgl_src;
44         struct qat_sym_sgl qat_sgl_dst;
45         phys_addr_t qat_sgl_src_phys_addr;
46         phys_addr_t qat_sgl_dst_phys_addr;
47         union {
48                 /* Used for Single-Pass AES-GMAC only */
49                 struct {
50                         struct icp_qat_hw_cipher_algo_blk cd_cipher
51                                         __rte_packed __rte_cache_aligned;
52                         phys_addr_t cd_phys_addr;
53                 } spc_gmac;
54         } opt;
55 };
56
57 int
58 qat_sym_build_request(void *in_op, uint8_t *out_msg,
59                 void *op_cookie, enum qat_device_gen qat_dev_gen);
60
61
62 /** Encrypt a single partial block
63  *  Depends on openssl libcrypto
64  *  Uses ECB+XOR to do CFB encryption, same result, more performant
65  */
66 static inline int
67 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
68                 uint8_t *iv, int ivlen, int srclen,
69                 void *bpi_ctx)
70 {
71         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
72         int encrypted_ivlen;
73         uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
74         uint8_t *encr = encrypted_iv;
75
76         /* ECB method: encrypt the IV, then XOR this with plaintext */
77         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
78                                                                 <= 0)
79                 goto cipher_encrypt_err;
80
81         for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
82                 *dst = *src ^ *encr;
83
84         return 0;
85
86 cipher_encrypt_err:
87         QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
88         return -EINVAL;
89 }
90
91 static inline uint32_t
92 qat_bpicipher_postprocess(struct qat_sym_session *ctx,
93                                 struct rte_crypto_op *op)
94 {
95         int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
96         struct rte_crypto_sym_op *sym_op = op->sym;
97         uint8_t last_block_len = block_len > 0 ?
98                         sym_op->cipher.data.length % block_len : 0;
99
100         if (last_block_len > 0 &&
101                         ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
102
103                 /* Encrypt last block */
104                 uint8_t *last_block, *dst, *iv;
105                 uint32_t last_block_offset;
106
107                 last_block_offset = sym_op->cipher.data.offset +
108                                 sym_op->cipher.data.length - last_block_len;
109                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
110                                 uint8_t *, last_block_offset);
111
112                 if (unlikely(sym_op->m_dst != NULL))
113                         /* out-of-place operation (OOP) */
114                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
115                                                 uint8_t *, last_block_offset);
116                 else
117                         dst = last_block;
118
119                 if (last_block_len < sym_op->cipher.data.length)
120                         /* use previous block ciphertext as IV */
121                         iv = dst - block_len;
122                 else
123                         /* runt block, i.e. less than one full block */
124                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
125                                         ctx->cipher_iv.offset);
126
127 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
128                 QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
129                         last_block, last_block_len);
130                 if (sym_op->m_dst != NULL)
131                         QAT_DP_HEXDUMP_LOG(DEBUG,
132                                 "BPI: dst before post-process:",
133                                 dst, last_block_len);
134 #endif
135                 bpi_cipher_encrypt(last_block, dst, iv, block_len,
136                                 last_block_len, ctx->bpi_ctx);
137 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
138                 QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
139                                 last_block, last_block_len);
140                 if (sym_op->m_dst != NULL)
141                         QAT_DP_HEXDUMP_LOG(DEBUG,
142                                 "BPI: dst after post-process:",
143                                 dst, last_block_len);
144 #endif
145         }
146         return sym_op->cipher.data.length - last_block_len;
147 }
148
149 #ifdef RTE_LIB_SECURITY
150 static inline void
151 qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
152 {
153         struct rte_crypto_sym_op *sym_op = op->sym;
154         uint32_t crc_data_ofs, crc_data_len, crc;
155         uint8_t *crc_data;
156
157         if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
158                         sym_op->auth.data.length != 0) {
159
160                 crc_data_ofs = sym_op->auth.data.offset;
161                 crc_data_len = sym_op->auth.data.length;
162                 crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
163                                 crc_data_ofs);
164
165                 crc = rte_net_crc_calc(crc_data, crc_data_len,
166                                 RTE_NET_CRC32_ETH);
167
168                 if (crc != *(uint32_t *)(crc_data + crc_data_len))
169                         op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
170         }
171 }
172
173 static inline void
174 qat_crc_generate(struct qat_sym_session *ctx,
175                         struct rte_crypto_op *op)
176 {
177         struct rte_crypto_sym_op *sym_op = op->sym;
178         uint32_t *crc, crc_data_len;
179         uint8_t *crc_data;
180
181         if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT &&
182                         sym_op->auth.data.length != 0 &&
183                         sym_op->m_src->nb_segs == 1) {
184
185                 crc_data_len = sym_op->auth.data.length;
186                 crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
187                                 sym_op->auth.data.offset);
188                 crc = (uint32_t *)(crc_data + crc_data_len);
189                 *crc = rte_net_crc_calc(crc_data, crc_data_len,
190                                 RTE_NET_CRC32_ETH);
191         }
192 }
193
194 static inline void
195 qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
196 {
197         struct rte_crypto_op *op;
198         struct qat_sym_session *ctx;
199         uint16_t i;
200
201         for (i = 0; i < nb_ops; i++) {
202                 op = (struct rte_crypto_op *)ops[i];
203
204                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
205                         ctx = (struct qat_sym_session *)
206                                 get_sec_session_private_data(
207                                         op->sym->sec_session);
208
209                         if (ctx == NULL || ctx->bpi_ctx == NULL)
210                                 continue;
211
212                         qat_crc_generate(ctx, op);
213                 }
214         }
215 }
216 #else
217
218 static inline void
219 qat_sym_preprocess_requests(void **ops __rte_unused,
220                                 uint16_t nb_ops __rte_unused)
221 {
222 }
223 #endif
224
225 static inline void
226 qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
227 {
228         struct icp_qat_fw_comn_resp *resp_msg =
229                         (struct icp_qat_fw_comn_resp *)resp;
230         struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
231                         (resp_msg->opaque_data);
232         struct qat_sym_session *sess;
233         uint8_t is_docsis_sec;
234
235 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
236         QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
237                         sizeof(struct icp_qat_fw_comn_resp));
238 #endif
239
240 #ifdef RTE_LIB_SECURITY
241         if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
242                 /*
243                  * Assuming at this point that if it's a security
244                  * op, that this is for DOCSIS
245                  */
246                 sess = (struct qat_sym_session *)
247                                 get_sec_session_private_data(
248                                 rx_op->sym->sec_session);
249                 is_docsis_sec = 1;
250         } else
251 #endif
252         {
253                 sess = (struct qat_sym_session *)
254                                 get_sym_session_private_data(
255                                 rx_op->sym->session,
256                                 qat_sym_driver_id);
257                 is_docsis_sec = 0;
258         }
259
260         if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
261                         ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
262                         resp_msg->comn_hdr.comn_status)) {
263
264                 rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
265         } else {
266                 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
267
268                 if (sess->bpi_ctx) {
269                         qat_bpicipher_postprocess(sess, rx_op);
270 #ifdef RTE_LIB_SECURITY
271                         if (is_docsis_sec)
272                                 qat_crc_verify(sess, rx_op);
273 #endif
274                 }
275         }
276
277         if (sess->is_single_pass_gmac) {
278                 struct qat_sym_op_cookie *cookie =
279                                 (struct qat_sym_op_cookie *) op_cookie;
280                 memset(cookie->opt.spc_gmac.cd_cipher.key, 0,
281                                 sess->auth_key_length);
282         }
283
284         *op = (void *)rx_op;
285 }
286
287 int
288 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
289         struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
290         enum rte_crypto_op_sess_type sess_type,
291         union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
292
293 int
294 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
295
296 #else
297
298 static inline void
299 qat_sym_preprocess_requests(void **ops __rte_unused,
300                                 uint16_t nb_ops __rte_unused)
301 {
302 }
303
304 static inline void
305 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
306         void *op_cookie __rte_unused)
307 {
308 }
309
310 #endif
311 #endif /* _QAT_SYM_H_ */