crypto/qat: unify symmetric functions
[dpdk.git] / drivers / crypto / qat / qat_sym.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2022 Intel Corporation
3  */
4
5 #ifndef _QAT_SYM_H_
6 #define _QAT_SYM_H_
7
8 #include <cryptodev_pmd.h>
9 #ifdef RTE_LIB_SECURITY
10 #include <rte_net_crc.h>
11 #endif
12
13 #ifdef BUILD_QAT_SYM
14 #include <openssl/evp.h>
15
16 #include "qat_common.h"
17 #include "qat_sym_session.h"
18 #include "qat_crypto.h"
19 #include "qat_logs.h"
20
21 #define BYTE_LENGTH    8
22 /* bpi is only used for partial blocks of DES and AES
23  * so AES block len can be assumed as max len for iv, src and dst
24  */
25 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
26
27 /** Intel(R) QAT Symmetric Crypto PMD name */
28 #define CRYPTODEV_NAME_QAT_SYM_PMD      crypto_qat
29
30 /* Internal capabilities */
31 #define QAT_SYM_CAP_MIXED_CRYPTO        (1 << 0)
32 #define QAT_SYM_CAP_VALID               (1 << 31)
33
34 /**
35  * Macro to add a sym capability
36  * helper function to add an sym capability
37  * <n: name> <b: block size> <k: key size> <d: digest size>
38  * <a: aad_size> <i: iv_size>
39  **/
40 #define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)                                 \
41         {                                                               \
42                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,                     \
43                 {.sym = {                                               \
44                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,        \
45                         {.auth = {                                      \
46                                 .algo = RTE_CRYPTO_AUTH_##n,            \
47                                 b, d                                    \
48                         }, }                                            \
49                 }, }                                                    \
50         }
51
52 #define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)                              \
53         {                                                               \
54                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,                     \
55                 {.sym = {                                               \
56                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,        \
57                         {.auth = {                                      \
58                                 .algo = RTE_CRYPTO_AUTH_##n,            \
59                                 b, k, d, a, i                           \
60                         }, }                                            \
61                 }, }                                                    \
62         }
63
64 #define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)                              \
65         {                                                               \
66                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,                     \
67                 {.sym = {                                               \
68                         .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,        \
69                         {.aead = {                                      \
70                                 .algo = RTE_CRYPTO_AEAD_##n,            \
71                                 b, k, d, a, i                           \
72                         }, }                                            \
73                 }, }                                                    \
74         }
75
76 #define QAT_SYM_CIPHER_CAP(n, b, k, i)                                  \
77         {                                                               \
78                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,                     \
79                 {.sym = {                                               \
80                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,      \
81                         {.cipher = {                                    \
82                                 .algo = RTE_CRYPTO_CIPHER_##n,          \
83                                 b, k, i                                 \
84                         }, }                                            \
85                 }, }                                                    \
86         }
87
88 /*
89  * Maximum number of SGL entries
90  */
91 #define QAT_SYM_SGL_MAX_NUMBER  16
92
93 /* Maximum data length for single pass GMAC: 2^14-1 */
94 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
95
96 struct qat_sym_session;
97
98 struct qat_sym_sgl {
99         qat_sgl_hdr;
100         struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
101 } __rte_packed __rte_cache_aligned;
102
103 struct qat_sym_op_cookie {
104         struct qat_sym_sgl qat_sgl_src;
105         struct qat_sym_sgl qat_sgl_dst;
106         phys_addr_t qat_sgl_src_phys_addr;
107         phys_addr_t qat_sgl_dst_phys_addr;
108         union {
109                 /* Used for Single-Pass AES-GMAC only */
110                 struct {
111                         struct icp_qat_hw_cipher_algo_blk cd_cipher
112                                         __rte_packed __rte_cache_aligned;
113                         phys_addr_t cd_phys_addr;
114                 } spc_gmac;
115         } opt;
116 };
117
118 struct qat_sym_dp_ctx {
119         struct qat_sym_session *session;
120         uint32_t tail;
121         uint32_t head;
122         uint16_t cached_enqueue;
123         uint16_t cached_dequeue;
124 };
125
126 uint16_t
127 qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
128                 uint16_t nb_ops);
129
130 uint16_t
131 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
132                 uint16_t nb_ops);
133
134 int
135 qat_sym_build_request(void *in_op, uint8_t *out_msg,
136                 void *op_cookie, enum qat_device_gen qat_dev_gen);
137
138
139 /** Encrypt a single partial block
140  *  Depends on openssl libcrypto
141  *  Uses ECB+XOR to do CFB encryption, same result, more performant
142  */
143 static inline int
144 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
145                 uint8_t *iv, int ivlen, int srclen,
146                 void *bpi_ctx)
147 {
148         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
149         int encrypted_ivlen;
150         uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
151         uint8_t *encr = encrypted_iv;
152
153         /* ECB method: encrypt the IV, then XOR this with plaintext */
154         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
155                                                                 <= 0)
156                 goto cipher_encrypt_err;
157
158         for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
159                 *dst = *src ^ *encr;
160
161         return 0;
162
163 cipher_encrypt_err:
164         QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
165         return -EINVAL;
166 }
167
168 static inline uint32_t
169 qat_bpicipher_postprocess(struct qat_sym_session *ctx,
170                                 struct rte_crypto_op *op)
171 {
172         int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
173         struct rte_crypto_sym_op *sym_op = op->sym;
174         uint8_t last_block_len = block_len > 0 ?
175                         sym_op->cipher.data.length % block_len : 0;
176
177         if (last_block_len > 0 &&
178                         ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
179
180                 /* Encrypt last block */
181                 uint8_t *last_block, *dst, *iv;
182                 uint32_t last_block_offset;
183
184                 last_block_offset = sym_op->cipher.data.offset +
185                                 sym_op->cipher.data.length - last_block_len;
186                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
187                                 uint8_t *, last_block_offset);
188
189                 if (unlikely(sym_op->m_dst != NULL))
190                         /* out-of-place operation (OOP) */
191                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
192                                                 uint8_t *, last_block_offset);
193                 else
194                         dst = last_block;
195
196                 if (last_block_len < sym_op->cipher.data.length)
197                         /* use previous block ciphertext as IV */
198                         iv = dst - block_len;
199                 else
200                         /* runt block, i.e. less than one full block */
201                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
202                                         ctx->cipher_iv.offset);
203
204 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
205                 QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
206                         last_block, last_block_len);
207                 if (sym_op->m_dst != NULL)
208                         QAT_DP_HEXDUMP_LOG(DEBUG,
209                                 "BPI: dst before post-process:",
210                                 dst, last_block_len);
211 #endif
212                 bpi_cipher_encrypt(last_block, dst, iv, block_len,
213                                 last_block_len, ctx->bpi_ctx);
214 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
215                 QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
216                                 last_block, last_block_len);
217                 if (sym_op->m_dst != NULL)
218                         QAT_DP_HEXDUMP_LOG(DEBUG,
219                                 "BPI: dst after post-process:",
220                                 dst, last_block_len);
221 #endif
222         }
223         return sym_op->cipher.data.length - last_block_len;
224 }
225
226 #ifdef RTE_LIB_SECURITY
227 static inline void
228 qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
229 {
230         struct rte_crypto_sym_op *sym_op = op->sym;
231         uint32_t crc_data_ofs, crc_data_len, crc;
232         uint8_t *crc_data;
233
234         if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
235                         sym_op->auth.data.length != 0) {
236
237                 crc_data_ofs = sym_op->auth.data.offset;
238                 crc_data_len = sym_op->auth.data.length;
239                 crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
240                                 crc_data_ofs);
241
242                 crc = rte_net_crc_calc(crc_data, crc_data_len,
243                                 RTE_NET_CRC32_ETH);
244
245                 if (crc != *(uint32_t *)(crc_data + crc_data_len))
246                         op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
247         }
248 }
249
250 static inline void
251 qat_crc_generate(struct qat_sym_session *ctx,
252                         struct rte_crypto_op *op)
253 {
254         struct rte_crypto_sym_op *sym_op = op->sym;
255         uint32_t *crc, crc_data_len;
256         uint8_t *crc_data;
257
258         if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT &&
259                         sym_op->auth.data.length != 0 &&
260                         sym_op->m_src->nb_segs == 1) {
261
262                 crc_data_len = sym_op->auth.data.length;
263                 crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
264                                 sym_op->auth.data.offset);
265                 crc = (uint32_t *)(crc_data + crc_data_len);
266                 *crc = rte_net_crc_calc(crc_data, crc_data_len,
267                                 RTE_NET_CRC32_ETH);
268         }
269 }
270
271 static inline void
272 qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
273 {
274         struct rte_crypto_op *op;
275         struct qat_sym_session *ctx;
276         uint16_t i;
277
278         for (i = 0; i < nb_ops; i++) {
279                 op = (struct rte_crypto_op *)ops[i];
280
281                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
282                         ctx = (struct qat_sym_session *)
283                                 get_sec_session_private_data(
284                                         op->sym->sec_session);
285
286                         if (ctx == NULL || ctx->bpi_ctx == NULL)
287                                 continue;
288
289                         qat_crc_generate(ctx, op);
290                 }
291         }
292 }
293 #endif
294
295 static __rte_always_inline int
296 qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
297                 uint64_t *dequeue_err_count __rte_unused)
298 {
299         struct icp_qat_fw_comn_resp *resp_msg =
300                         (struct icp_qat_fw_comn_resp *)resp;
301         struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
302                         (resp_msg->opaque_data);
303         struct qat_sym_session *sess;
304         uint8_t is_docsis_sec;
305
306 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
307         QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
308                         sizeof(struct icp_qat_fw_comn_resp));
309 #endif
310
311 #ifdef RTE_LIB_SECURITY
312         if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
313                 /*
314                  * Assuming at this point that if it's a security
315                  * op, that this is for DOCSIS
316                  */
317                 sess = (struct qat_sym_session *)
318                                 get_sec_session_private_data(
319                                 rx_op->sym->sec_session);
320                 is_docsis_sec = 1;
321         } else
322 #endif
323         {
324                 sess = (struct qat_sym_session *)
325                                 get_sym_session_private_data(
326                                 rx_op->sym->session,
327                                 qat_sym_driver_id);
328                 is_docsis_sec = 0;
329         }
330
331         if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
332                         ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
333                         resp_msg->comn_hdr.comn_status)) {
334
335                 rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
336         } else {
337                 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
338
339                 if (sess->bpi_ctx) {
340                         qat_bpicipher_postprocess(sess, rx_op);
341 #ifdef RTE_LIB_SECURITY
342                         if (is_docsis_sec)
343                                 qat_crc_verify(sess, rx_op);
344 #endif
345                 }
346         }
347
348         if (sess->is_single_pass_gmac) {
349                 struct qat_sym_op_cookie *cookie =
350                                 (struct qat_sym_op_cookie *) op_cookie;
351                 memset(cookie->opt.spc_gmac.cd_cipher.key, 0,
352                                 sess->auth_key_length);
353         }
354
355         *op = (void *)rx_op;
356
357         /*
358          * return 1 as dequeue op only move on to the next op
359          * if one was ready to return to API
360          */
361         return 1;
362 }
363
364 int
365 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
366         struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
367         enum rte_crypto_op_sess_type sess_type,
368         union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
369
370 int
371 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
372
373 void
374 qat_sym_init_op_cookie(void *cookie);
375
376 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
377 static __rte_always_inline void
378 qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
379                 struct qat_sym_session *ctx,
380                 struct rte_crypto_vec *vec, uint32_t vec_len,
381                 struct rte_crypto_va_iova_ptr *cipher_iv,
382                 struct rte_crypto_va_iova_ptr *auth_iv,
383                 struct rte_crypto_va_iova_ptr *aad,
384                 struct rte_crypto_va_iova_ptr *digest)
385 {
386         uint32_t i;
387
388         QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
389                         sizeof(struct icp_qat_fw_la_bulk_req));
390         for (i = 0; i < vec_len; i++)
391                 QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
392         if (cipher_iv && ctx->cipher_iv.length > 0)
393                 QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
394                                 ctx->cipher_iv.length);
395         if (auth_iv && ctx->auth_iv.length > 0)
396                 QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
397                                 ctx->auth_iv.length);
398         if (aad && ctx->aad_len > 0)
399                 QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
400                                 ctx->aad_len);
401         if (digest && ctx->digest_length > 0)
402                 QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
403                                 ctx->digest_length);
404 }
405 #else
406 static __rte_always_inline void
407 qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
408                 struct qat_sym_session *ctx __rte_unused,
409                 struct rte_crypto_vec *vec __rte_unused,
410                 uint32_t vec_len __rte_unused,
411                 struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
412                 struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
413                 struct rte_crypto_va_iova_ptr *aad __rte_unused,
414                 struct rte_crypto_va_iova_ptr *digest __rte_unused)
415 {
416 }
417 #endif
418
419 #else
420
421 static inline void
422 qat_sym_preprocess_requests(void **ops __rte_unused,
423                                 uint16_t nb_ops __rte_unused)
424 {
425 }
426
427 static inline void
428 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
429         void *op_cookie __rte_unused)
430 {
431 }
432
433 #endif /* BUILD_QAT_SYM */
434 #endif /* _QAT_SYM_H_ */