1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2022 Intel Corporation
8 #include <cryptodev_pmd.h>
9 #ifdef RTE_LIB_SECURITY
10 #include <rte_net_crc.h>
14 #include <openssl/evp.h>
16 #include "qat_common.h"
17 #include "qat_sym_session.h"
18 #include "qat_crypto.h"
22 /* bpi is only used for partial blocks of DES and AES
23 * so AES block len can be assumed as max len for iv, src and dst
25 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
27 /** Intel(R) QAT Symmetric Crypto PMD name */
28 #define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
30 /* Internal capabilities */
31 #define QAT_SYM_CAP_MIXED_CRYPTO (1 << 0)
32 #define QAT_SYM_CAP_VALID (1 << 31)
35 * Macro to add a sym capability
36 * helper function to add an sym capability
37 * <n: name> <b: block size> <k: key size> <d: digest size>
38 * <a: aad_size> <i: iv_size>
40 #define QAT_SYM_PLAIN_AUTH_CAP(n, b, d) \
42 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
44 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
46 .algo = RTE_CRYPTO_AUTH_##n, \
52 #define QAT_SYM_AUTH_CAP(n, b, k, d, a, i) \
54 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
56 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
58 .algo = RTE_CRYPTO_AUTH_##n, \
64 #define QAT_SYM_AEAD_CAP(n, b, k, d, a, i) \
66 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
68 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
70 .algo = RTE_CRYPTO_AEAD_##n, \
76 #define QAT_SYM_CIPHER_CAP(n, b, k, i) \
78 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
80 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
82 .algo = RTE_CRYPTO_CIPHER_##n, \
89 * Maximum number of SGL entries
91 #define QAT_SYM_SGL_MAX_NUMBER 16
93 /* Maximum data length for single pass GMAC: 2^14-1 */
94 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
96 struct qat_sym_session;
100 struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
101 } __rte_packed __rte_cache_aligned;
103 struct qat_sym_op_cookie {
104 struct qat_sym_sgl qat_sgl_src;
105 struct qat_sym_sgl qat_sgl_dst;
106 phys_addr_t qat_sgl_src_phys_addr;
107 phys_addr_t qat_sgl_dst_phys_addr;
109 /* Used for Single-Pass AES-GMAC only */
111 struct icp_qat_hw_cipher_algo_blk cd_cipher
112 __rte_packed __rte_cache_aligned;
113 phys_addr_t cd_phys_addr;
118 struct qat_sym_dp_ctx {
119 struct qat_sym_session *session;
122 uint16_t cached_enqueue;
123 uint16_t cached_dequeue;
127 qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
131 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
134 /** Encrypt a single partial block
135 * Depends on openssl libcrypto
136 * Uses ECB+XOR to do CFB encryption, same result, more performant
139 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
140 uint8_t *iv, int ivlen, int srclen,
143 EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
145 uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
146 uint8_t *encr = encrypted_iv;
148 /* ECB method: encrypt the IV, then XOR this with plaintext */
149 if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
151 goto cipher_encrypt_err;
153 for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
159 QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
163 static inline uint32_t
164 qat_bpicipher_postprocess(struct qat_sym_session *ctx,
165 struct rte_crypto_op *op)
167 int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
168 struct rte_crypto_sym_op *sym_op = op->sym;
169 uint8_t last_block_len = block_len > 0 ?
170 sym_op->cipher.data.length % block_len : 0;
172 if (last_block_len > 0 &&
173 ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
175 /* Encrypt last block */
176 uint8_t *last_block, *dst, *iv;
177 uint32_t last_block_offset;
179 last_block_offset = sym_op->cipher.data.offset +
180 sym_op->cipher.data.length - last_block_len;
181 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
182 uint8_t *, last_block_offset);
184 if (unlikely(sym_op->m_dst != NULL))
185 /* out-of-place operation (OOP) */
186 dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
187 uint8_t *, last_block_offset);
191 if (last_block_len < sym_op->cipher.data.length)
192 /* use previous block ciphertext as IV */
193 iv = dst - block_len;
195 /* runt block, i.e. less than one full block */
196 iv = rte_crypto_op_ctod_offset(op, uint8_t *,
197 ctx->cipher_iv.offset);
199 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
200 QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
201 last_block, last_block_len);
202 if (sym_op->m_dst != NULL)
203 QAT_DP_HEXDUMP_LOG(DEBUG,
204 "BPI: dst before post-process:",
205 dst, last_block_len);
207 bpi_cipher_encrypt(last_block, dst, iv, block_len,
208 last_block_len, ctx->bpi_ctx);
209 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
210 QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
211 last_block, last_block_len);
212 if (sym_op->m_dst != NULL)
213 QAT_DP_HEXDUMP_LOG(DEBUG,
214 "BPI: dst after post-process:",
215 dst, last_block_len);
218 return sym_op->cipher.data.length - last_block_len;
221 #ifdef RTE_LIB_SECURITY
223 qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
225 struct rte_crypto_sym_op *sym_op = op->sym;
226 uint32_t crc_data_ofs, crc_data_len, crc;
229 if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
230 sym_op->auth.data.length != 0) {
232 crc_data_ofs = sym_op->auth.data.offset;
233 crc_data_len = sym_op->auth.data.length;
234 crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
237 crc = rte_net_crc_calc(crc_data, crc_data_len,
240 if (crc != *(uint32_t *)(crc_data + crc_data_len))
241 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
246 qat_crc_generate(struct qat_sym_session *ctx,
247 struct rte_crypto_op *op)
249 struct rte_crypto_sym_op *sym_op = op->sym;
250 uint32_t *crc, crc_data_len;
253 if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT &&
254 sym_op->auth.data.length != 0 &&
255 sym_op->m_src->nb_segs == 1) {
257 crc_data_len = sym_op->auth.data.length;
258 crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
259 sym_op->auth.data.offset);
260 crc = (uint32_t *)(crc_data + crc_data_len);
261 *crc = rte_net_crc_calc(crc_data, crc_data_len,
267 qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
269 struct rte_crypto_op *op;
270 struct qat_sym_session *ctx;
273 for (i = 0; i < nb_ops; i++) {
274 op = (struct rte_crypto_op *)ops[i];
276 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
277 ctx = (struct qat_sym_session *)
278 get_sec_session_private_data(
279 op->sym->sec_session);
281 if (ctx == NULL || ctx->bpi_ctx == NULL)
284 qat_crc_generate(ctx, op);
290 static __rte_always_inline int
291 qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
292 uint64_t *dequeue_err_count __rte_unused)
294 struct icp_qat_fw_comn_resp *resp_msg =
295 (struct icp_qat_fw_comn_resp *)resp;
296 struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
297 (resp_msg->opaque_data);
298 struct qat_sym_session *sess;
299 uint8_t is_docsis_sec;
301 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
302 QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
303 sizeof(struct icp_qat_fw_comn_resp));
306 #ifdef RTE_LIB_SECURITY
307 if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
309 * Assuming at this point that if it's a security
310 * op, that this is for DOCSIS
312 sess = (struct qat_sym_session *)
313 get_sec_session_private_data(
314 rx_op->sym->sec_session);
319 sess = (struct qat_sym_session *)
320 get_sym_session_private_data(
326 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
327 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
328 resp_msg->comn_hdr.comn_status)) {
330 rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
332 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
335 qat_bpicipher_postprocess(sess, rx_op);
336 #ifdef RTE_LIB_SECURITY
338 qat_crc_verify(sess, rx_op);
343 if (sess->is_single_pass_gmac) {
344 struct qat_sym_op_cookie *cookie =
345 (struct qat_sym_op_cookie *) op_cookie;
346 memset(cookie->opt.spc_gmac.cd_cipher.key, 0,
347 sess->auth_key_length);
353 * return 1 as dequeue op only move on to the next op
354 * if one was ready to return to API
360 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
361 struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
362 enum rte_crypto_op_sess_type sess_type,
363 union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
366 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
369 qat_sym_init_op_cookie(void *cookie);
371 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
372 static __rte_always_inline void
373 qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
374 struct qat_sym_session *ctx,
375 struct rte_crypto_vec *vec, uint32_t vec_len,
376 struct rte_crypto_va_iova_ptr *cipher_iv,
377 struct rte_crypto_va_iova_ptr *auth_iv,
378 struct rte_crypto_va_iova_ptr *aad,
379 struct rte_crypto_va_iova_ptr *digest)
383 QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
384 sizeof(struct icp_qat_fw_la_bulk_req));
385 for (i = 0; i < vec_len; i++)
386 QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
387 if (cipher_iv && ctx->cipher_iv.length > 0)
388 QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
389 ctx->cipher_iv.length);
390 if (auth_iv && ctx->auth_iv.length > 0)
391 QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
392 ctx->auth_iv.length);
393 if (aad && ctx->aad_len > 0)
394 QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
396 if (digest && ctx->digest_length > 0)
397 QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
401 static __rte_always_inline void
402 qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
403 struct qat_sym_session *ctx __rte_unused,
404 struct rte_crypto_vec *vec __rte_unused,
405 uint32_t vec_len __rte_unused,
406 struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
407 struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
408 struct rte_crypto_va_iova_ptr *aad __rte_unused,
409 struct rte_crypto_va_iova_ptr *digest __rte_unused)
417 qat_sym_preprocess_requests(void **ops __rte_unused,
418 uint16_t nb_ops __rte_unused)
423 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
424 void *op_cookie __rte_unused)
428 #endif /* BUILD_QAT_SYM */
429 #endif /* _QAT_SYM_H_ */