1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 * Copyright(c) 2015-2022 Intel Corporation
5 #define OPENSSL_API_COMPAT 0x10100000L
7 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
8 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
9 #include <openssl/md5.h> /* Needed to calculate pre-compute values */
10 #include <openssl/evp.h> /* Needed for bpi runt block processing */
12 #ifdef RTE_QAT_LIBIPSECMB
13 #include <intel-ipsec-mb.h>
16 #include <rte_memcpy.h>
17 #include <rte_common.h>
18 #include <rte_spinlock.h>
19 #include <rte_byteorder.h>
21 #include <rte_malloc.h>
22 #include <rte_crypto_sym.h>
23 #ifdef RTE_LIB_SECURITY
24 #include <rte_security.h>
28 #include "qat_sym_session.h"
31 #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
32 #include <openssl/provider.h>
35 extern int qat_ipsec_mb_lib;
37 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
38 static const uint8_t sha1InitialState[] = {
39 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
40 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
42 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
43 static const uint8_t sha224InitialState[] = {
44 0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
45 0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
46 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
48 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
49 static const uint8_t sha256InitialState[] = {
50 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
51 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
52 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
54 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
55 static const uint8_t sha384InitialState[] = {
56 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
57 0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
58 0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
59 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
60 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
61 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
63 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
64 static const uint8_t sha512InitialState[] = {
65 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
66 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
67 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
68 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
69 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
70 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
73 qat_sym_cd_cipher_set(struct qat_sym_session *cd,
74 const uint8_t *enckey,
78 qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
79 const uint8_t *authkey,
83 unsigned int operation);
85 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
87 /* Req/cd init functions */
90 qat_sym_session_finalize(struct qat_sym_session *session)
92 qat_sym_session_init_common_hdr(session);
95 /** Frees a context previously created
96 * Depends on openssl libcrypto
99 bpi_cipher_ctx_free(void *bpi_ctx)
102 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
105 /** Creates a context in either AES or DES in ECB mode
106 * Depends on openssl libcrypto
109 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
110 enum rte_crypto_cipher_operation direction __rte_unused,
111 const uint8_t *key, uint16_t key_length, void **ctx)
113 const EVP_CIPHER *algo = NULL;
115 *ctx = EVP_CIPHER_CTX_new();
122 if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
123 algo = EVP_des_ecb();
125 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
126 algo = EVP_aes_128_ecb();
128 algo = EVP_aes_256_ecb();
130 /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
131 if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
140 EVP_CIPHER_CTX_free(*ctx);
145 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
146 struct qat_cryptodev_private *internals)
149 const struct rte_cryptodev_capabilities *capability;
151 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
152 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
153 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
156 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
159 if (capability->sym.cipher.algo == algo)
166 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
167 struct qat_cryptodev_private *internals)
170 const struct rte_cryptodev_capabilities *capability;
172 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
173 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
174 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
177 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
180 if (capability->sym.auth.algo == algo)
187 qat_sym_session_clear(struct rte_cryptodev *dev,
188 struct rte_cryptodev_sym_session *sess)
190 uint8_t index = dev->driver_id;
191 void *sess_priv = get_sym_session_private_data(sess, index);
192 struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
196 bpi_cipher_ctx_free(s->bpi_ctx);
197 memset(s, 0, qat_sym_session_get_private_size(dev));
198 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
200 set_sym_session_private_data(sess, index, NULL);
201 rte_mempool_put(sess_mp, sess_priv);
206 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
209 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
210 return ICP_QAT_FW_LA_CMD_CIPHER;
212 /* Authentication Only */
213 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
214 return ICP_QAT_FW_LA_CMD_AUTH;
217 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
218 /* AES-GCM and AES-CCM works with different direction
219 * GCM first encrypts and generate hash where AES-CCM
220 * first generate hash and encrypts. Similar relation
221 * applies to decryption.
223 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
224 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
225 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
227 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
229 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
230 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
232 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
235 if (xform->next == NULL)
238 /* Cipher then Authenticate */
239 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
240 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
241 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
243 /* Authenticate then Cipher */
244 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
245 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
246 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
251 static struct rte_crypto_auth_xform *
252 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
255 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
264 static struct rte_crypto_cipher_xform *
265 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
268 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
269 return &xform->cipher;
278 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
279 struct rte_crypto_sym_xform *xform,
280 struct qat_sym_session *session)
282 struct qat_cryptodev_private *internals = dev->data->dev_private;
283 struct rte_crypto_cipher_xform *cipher_xform = NULL;
284 enum qat_device_gen qat_dev_gen =
285 internals->qat_dev->qat_dev_gen;
288 /* Get cipher xform from crypto xform chain */
289 cipher_xform = qat_get_cipher_xform(xform);
291 session->cipher_iv.offset = cipher_xform->iv.offset;
292 session->cipher_iv.length = cipher_xform->iv.length;
294 switch (cipher_xform->algo) {
295 case RTE_CRYPTO_CIPHER_AES_CBC:
296 if (qat_sym_validate_aes_key(cipher_xform->key.length,
297 &session->qat_cipher_alg) != 0) {
298 QAT_LOG(ERR, "Invalid AES cipher key size");
302 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
304 case RTE_CRYPTO_CIPHER_AES_CTR:
305 if (qat_sym_validate_aes_key(cipher_xform->key.length,
306 &session->qat_cipher_alg) != 0) {
307 QAT_LOG(ERR, "Invalid AES cipher key size");
311 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
312 if (qat_dev_gen == QAT_GEN4)
315 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
316 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
317 &session->qat_cipher_alg) != 0) {
318 QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
322 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
324 case RTE_CRYPTO_CIPHER_NULL:
325 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
326 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
328 case RTE_CRYPTO_CIPHER_KASUMI_F8:
329 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
330 &session->qat_cipher_alg) != 0) {
331 QAT_LOG(ERR, "Invalid KASUMI cipher key size");
335 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
337 case RTE_CRYPTO_CIPHER_3DES_CBC:
338 if (qat_sym_validate_3des_key(cipher_xform->key.length,
339 &session->qat_cipher_alg) != 0) {
340 QAT_LOG(ERR, "Invalid 3DES cipher key size");
344 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
346 case RTE_CRYPTO_CIPHER_DES_CBC:
347 if (qat_sym_validate_des_key(cipher_xform->key.length,
348 &session->qat_cipher_alg) != 0) {
349 QAT_LOG(ERR, "Invalid DES cipher key size");
353 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
355 case RTE_CRYPTO_CIPHER_3DES_CTR:
356 if (qat_sym_validate_3des_key(cipher_xform->key.length,
357 &session->qat_cipher_alg) != 0) {
358 QAT_LOG(ERR, "Invalid 3DES cipher key size");
362 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
364 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
365 ret = bpi_cipher_ctx_init(
368 cipher_xform->key.data,
369 cipher_xform->key.length,
372 QAT_LOG(ERR, "failed to create DES BPI ctx");
375 if (qat_sym_validate_des_key(cipher_xform->key.length,
376 &session->qat_cipher_alg) != 0) {
377 QAT_LOG(ERR, "Invalid DES cipher key size");
381 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
383 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
384 ret = bpi_cipher_ctx_init(
387 cipher_xform->key.data,
388 cipher_xform->key.length,
391 QAT_LOG(ERR, "failed to create AES BPI ctx");
394 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
395 &session->qat_cipher_alg) != 0) {
396 QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
400 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
402 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
403 if (!qat_is_cipher_alg_supported(
404 cipher_xform->algo, internals)) {
405 QAT_LOG(ERR, "%s not supported on this device",
406 rte_crypto_cipher_algorithm_strings
407 [cipher_xform->algo]);
411 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
412 &session->qat_cipher_alg) != 0) {
413 QAT_LOG(ERR, "Invalid ZUC cipher key size");
417 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
419 case RTE_CRYPTO_CIPHER_AES_XTS:
420 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
421 QAT_LOG(ERR, "AES-XTS-192 not supported");
425 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
426 &session->qat_cipher_alg) != 0) {
427 QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
431 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
433 case RTE_CRYPTO_CIPHER_3DES_ECB:
434 case RTE_CRYPTO_CIPHER_AES_ECB:
435 case RTE_CRYPTO_CIPHER_AES_F8:
436 case RTE_CRYPTO_CIPHER_ARC4:
437 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
442 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
448 if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
449 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
451 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
453 if (qat_sym_cd_cipher_set(session,
454 cipher_xform->key.data,
455 cipher_xform->key.length)) {
463 if (session->bpi_ctx) {
464 bpi_cipher_ctx_free(session->bpi_ctx);
465 session->bpi_ctx = NULL;
471 qat_sym_session_configure(struct rte_cryptodev *dev,
472 struct rte_crypto_sym_xform *xform,
473 struct rte_cryptodev_sym_session *sess,
474 struct rte_mempool *mempool)
476 void *sess_private_data;
479 if (rte_mempool_get(mempool, &sess_private_data)) {
481 "Couldn't get object from session mempool");
485 #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
486 OSSL_PROVIDER *legacy;
487 OSSL_PROVIDER *deflt;
489 /* Load Multiple providers into the default (NULL) library context */
490 legacy = OSSL_PROVIDER_load(NULL, "legacy");
494 deflt = OSSL_PROVIDER_load(NULL, "default");
496 OSSL_PROVIDER_unload(legacy);
500 ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
503 "Crypto QAT PMD: failed to configure session parameters");
505 /* Return session to mempool */
506 rte_mempool_put(mempool, sess_private_data);
510 set_sym_session_private_data(sess, dev->driver_id,
513 # if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
514 OSSL_PROVIDER_unload(legacy);
515 OSSL_PROVIDER_unload(deflt);
521 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
522 struct rte_crypto_sym_xform *xform, void *session_private)
524 struct qat_sym_session *session = session_private;
525 struct qat_cryptodev_private *internals = dev->data->dev_private;
526 enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
530 /* Verify the session physical address is known */
531 rte_iova_t session_paddr = rte_mempool_virt2iova(session);
532 if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
534 "Session physical address unknown. Bad memory pool.");
538 memset(session, 0, sizeof(*session));
539 /* Set context descriptor physical address */
540 session->cd_paddr = session_paddr +
541 offsetof(struct qat_sym_session, cd);
543 session->dev_id = internals->dev_id;
544 session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
547 /* Get requested QAT command id */
548 qat_cmd_id = qat_get_cmd_id(xform);
549 if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
550 QAT_LOG(ERR, "Unsupported xform chain requested");
553 session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
554 switch (session->qat_cmd) {
555 case ICP_QAT_FW_LA_CMD_CIPHER:
556 ret = qat_sym_session_configure_cipher(dev, xform, session);
560 case ICP_QAT_FW_LA_CMD_AUTH:
561 ret = qat_sym_session_configure_auth(dev, xform, session);
564 session->is_single_pass_gmac =
565 qat_dev_gen == QAT_GEN3 &&
566 xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
567 xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
569 case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
570 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
571 ret = qat_sym_session_configure_aead(dev, xform,
576 ret = qat_sym_session_configure_cipher(dev,
580 ret = qat_sym_session_configure_auth(dev,
586 case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
587 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
588 ret = qat_sym_session_configure_aead(dev, xform,
593 ret = qat_sym_session_configure_auth(dev,
597 ret = qat_sym_session_configure_cipher(dev,
603 case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
604 case ICP_QAT_FW_LA_CMD_TRNG_TEST:
605 case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
606 case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
607 case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
608 case ICP_QAT_FW_LA_CMD_MGF1:
609 case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
610 case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
611 case ICP_QAT_FW_LA_CMD_DELIMITER:
612 QAT_LOG(ERR, "Unsupported Service %u",
616 QAT_LOG(ERR, "Unsupported Service %u",
620 qat_sym_session_finalize(session);
622 return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
627 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
628 const struct rte_crypto_aead_xform *aead_xform)
630 session->is_single_pass = 1;
631 session->is_auth = 1;
632 session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
633 /* Chacha-Poly is special case that use QAT CTR mode */
634 if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
635 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
637 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
639 session->cipher_iv.offset = aead_xform->iv.offset;
640 session->cipher_iv.length = aead_xform->iv.length;
641 session->aad_len = aead_xform->aad_length;
642 session->digest_length = aead_xform->digest_length;
644 if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
645 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
646 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
648 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
649 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
656 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
657 struct rte_crypto_sym_xform *xform,
658 struct qat_sym_session *session)
660 struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
661 struct qat_cryptodev_private *internals = dev->data->dev_private;
662 const uint8_t *key_data = auth_xform->key.data;
663 uint8_t key_length = auth_xform->key.length;
664 enum qat_device_gen qat_dev_gen =
665 internals->qat_dev->qat_dev_gen;
667 session->aes_cmac = 0;
668 session->auth_key_length = auth_xform->key.length;
669 session->auth_iv.offset = auth_xform->iv.offset;
670 session->auth_iv.length = auth_xform->iv.length;
671 session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
672 session->is_auth = 1;
673 session->digest_length = auth_xform->digest_length;
675 switch (auth_xform->algo) {
676 case RTE_CRYPTO_AUTH_SHA1:
677 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
678 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
680 case RTE_CRYPTO_AUTH_SHA224:
681 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
682 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
684 case RTE_CRYPTO_AUTH_SHA256:
685 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
686 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
688 case RTE_CRYPTO_AUTH_SHA384:
689 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
690 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
692 case RTE_CRYPTO_AUTH_SHA512:
693 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
694 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
696 case RTE_CRYPTO_AUTH_SHA1_HMAC:
697 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
699 case RTE_CRYPTO_AUTH_SHA224_HMAC:
700 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
702 case RTE_CRYPTO_AUTH_SHA256_HMAC:
703 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
705 case RTE_CRYPTO_AUTH_SHA384_HMAC:
706 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
708 case RTE_CRYPTO_AUTH_SHA512_HMAC:
709 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
711 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
712 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
714 case RTE_CRYPTO_AUTH_AES_CMAC:
715 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
716 session->aes_cmac = 1;
718 case RTE_CRYPTO_AUTH_AES_GMAC:
719 if (qat_sym_validate_aes_key(auth_xform->key.length,
720 &session->qat_cipher_alg) != 0) {
721 QAT_LOG(ERR, "Invalid AES key size");
724 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
725 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
726 if (session->auth_iv.length == 0)
727 session->auth_iv.length = AES_GCM_J0_LEN;
729 session->is_iv12B = 1;
730 if (qat_dev_gen == QAT_GEN4) {
731 session->is_cnt_zero = 1;
735 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
736 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
738 case RTE_CRYPTO_AUTH_MD5_HMAC:
739 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
741 case RTE_CRYPTO_AUTH_NULL:
742 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
744 case RTE_CRYPTO_AUTH_KASUMI_F9:
745 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
747 case RTE_CRYPTO_AUTH_ZUC_EIA3:
748 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
749 QAT_LOG(ERR, "%s not supported on this device",
750 rte_crypto_auth_algorithm_strings
754 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
756 case RTE_CRYPTO_AUTH_MD5:
757 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
758 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
762 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
767 if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
768 session->is_gmac = 1;
769 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
770 session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
771 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
773 * It needs to create cipher desc content first,
774 * then authentication
776 if (qat_sym_cd_cipher_set(session,
777 auth_xform->key.data,
778 auth_xform->key.length))
781 if (qat_sym_cd_auth_set(session,
785 auth_xform->digest_length,
789 session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
790 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
792 * It needs to create authentication desc content first,
796 if (qat_sym_cd_auth_set(session,
800 auth_xform->digest_length,
804 if (qat_sym_cd_cipher_set(session,
805 auth_xform->key.data,
806 auth_xform->key.length))
810 if (qat_sym_cd_auth_set(session,
814 auth_xform->digest_length,
823 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
824 struct rte_crypto_sym_xform *xform,
825 struct qat_sym_session *session)
827 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
828 enum rte_crypto_auth_operation crypto_operation;
829 struct qat_cryptodev_private *internals =
830 dev->data->dev_private;
831 enum qat_device_gen qat_dev_gen =
832 internals->qat_dev->qat_dev_gen;
835 * Store AEAD IV parameters as cipher IV,
836 * to avoid unnecessary memory usage
838 session->cipher_iv.offset = xform->aead.iv.offset;
839 session->cipher_iv.length = xform->aead.iv.length;
841 session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
842 session->is_auth = 1;
843 session->digest_length = aead_xform->digest_length;
845 session->is_single_pass = 0;
846 switch (aead_xform->algo) {
847 case RTE_CRYPTO_AEAD_AES_GCM:
848 if (qat_sym_validate_aes_key(aead_xform->key.length,
849 &session->qat_cipher_alg) != 0) {
850 QAT_LOG(ERR, "Invalid AES key size");
853 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
854 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
856 if (qat_dev_gen == QAT_GEN4)
858 if (session->cipher_iv.length == 0) {
859 session->cipher_iv.length = AES_GCM_J0_LEN;
862 session->is_iv12B = 1;
863 if (qat_dev_gen < QAT_GEN3)
865 qat_sym_session_handle_single_pass(session,
868 case RTE_CRYPTO_AEAD_AES_CCM:
869 if (qat_sym_validate_aes_key(aead_xform->key.length,
870 &session->qat_cipher_alg) != 0) {
871 QAT_LOG(ERR, "Invalid AES key size");
874 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
875 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
876 if (qat_dev_gen == QAT_GEN4)
879 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
880 if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
882 if (qat_dev_gen == QAT_GEN4)
884 session->qat_cipher_alg =
885 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
886 qat_sym_session_handle_single_pass(session,
890 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
895 if (session->is_single_pass) {
896 if (qat_sym_cd_cipher_set(session,
897 aead_xform->key.data, aead_xform->key.length))
899 } else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
900 aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
901 (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
902 aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
903 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
905 * It needs to create cipher desc content first,
906 * then authentication
908 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
909 RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
911 if (qat_sym_cd_cipher_set(session,
912 aead_xform->key.data,
913 aead_xform->key.length))
916 if (qat_sym_cd_auth_set(session,
917 aead_xform->key.data,
918 aead_xform->key.length,
919 aead_xform->aad_length,
920 aead_xform->digest_length,
924 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
926 * It needs to create authentication desc content first,
930 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
931 RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
933 if (qat_sym_cd_auth_set(session,
934 aead_xform->key.data,
935 aead_xform->key.length,
936 aead_xform->aad_length,
937 aead_xform->digest_length,
941 if (qat_sym_cd_cipher_set(session,
942 aead_xform->key.data,
943 aead_xform->key.length))
950 unsigned int qat_sym_session_get_private_size(
951 struct rte_cryptodev *dev __rte_unused)
953 return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
956 /* returns block size in bytes per cipher algo */
957 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
959 switch (qat_cipher_alg) {
960 case ICP_QAT_HW_CIPHER_ALGO_DES:
961 return ICP_QAT_HW_DES_BLK_SZ;
962 case ICP_QAT_HW_CIPHER_ALGO_3DES:
963 return ICP_QAT_HW_3DES_BLK_SZ;
964 case ICP_QAT_HW_CIPHER_ALGO_AES128:
965 case ICP_QAT_HW_CIPHER_ALGO_AES192:
966 case ICP_QAT_HW_CIPHER_ALGO_AES256:
967 return ICP_QAT_HW_AES_BLK_SZ;
969 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
976 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
977 * This is digest size rounded up to nearest quadword
979 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
981 switch (qat_hash_alg) {
982 case ICP_QAT_HW_AUTH_ALGO_SHA1:
983 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
984 QAT_HW_DEFAULT_ALIGNMENT);
985 case ICP_QAT_HW_AUTH_ALGO_SHA224:
986 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
987 QAT_HW_DEFAULT_ALIGNMENT);
988 case ICP_QAT_HW_AUTH_ALGO_SHA256:
989 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
990 QAT_HW_DEFAULT_ALIGNMENT);
991 case ICP_QAT_HW_AUTH_ALGO_SHA384:
992 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
993 QAT_HW_DEFAULT_ALIGNMENT);
994 case ICP_QAT_HW_AUTH_ALGO_SHA512:
995 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
996 QAT_HW_DEFAULT_ALIGNMENT);
997 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
998 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
999 QAT_HW_DEFAULT_ALIGNMENT);
1000 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1001 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1002 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1003 QAT_HW_DEFAULT_ALIGNMENT);
1004 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1005 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1006 QAT_HW_DEFAULT_ALIGNMENT);
1007 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1008 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1009 QAT_HW_DEFAULT_ALIGNMENT);
1010 case ICP_QAT_HW_AUTH_ALGO_MD5:
1011 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1012 QAT_HW_DEFAULT_ALIGNMENT);
1013 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1014 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1015 QAT_HW_DEFAULT_ALIGNMENT);
1016 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1017 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1018 QAT_HW_DEFAULT_ALIGNMENT);
1019 case ICP_QAT_HW_AUTH_ALGO_NULL:
1020 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1021 QAT_HW_DEFAULT_ALIGNMENT);
1022 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1023 /* return maximum state1 size in this case */
1024 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1025 QAT_HW_DEFAULT_ALIGNMENT);
1027 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1033 /* returns digest size in bytes per hash algo */
1034 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1036 switch (qat_hash_alg) {
1037 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1038 return ICP_QAT_HW_SHA1_STATE1_SZ;
1039 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1040 return ICP_QAT_HW_SHA224_STATE1_SZ;
1041 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1042 return ICP_QAT_HW_SHA256_STATE1_SZ;
1043 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1044 return ICP_QAT_HW_SHA384_STATE1_SZ;
1045 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1046 return ICP_QAT_HW_SHA512_STATE1_SZ;
1047 case ICP_QAT_HW_AUTH_ALGO_MD5:
1048 return ICP_QAT_HW_MD5_STATE1_SZ;
1049 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1050 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1051 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1052 /* return maximum digest size in this case */
1053 return ICP_QAT_HW_SHA512_STATE1_SZ;
1055 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1061 /* returns block size in byes per hash algo */
1062 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1064 switch (qat_hash_alg) {
1065 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1067 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1068 return SHA256_CBLOCK;
1069 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1070 return SHA256_CBLOCK;
1071 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1072 return SHA512_CBLOCK;
1073 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1074 return SHA512_CBLOCK;
1075 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1077 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1078 return ICP_QAT_HW_AES_BLK_SZ;
1079 case ICP_QAT_HW_AUTH_ALGO_MD5:
1081 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1082 /* return maximum block size in this case */
1083 return SHA512_CBLOCK;
1085 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1091 #define HMAC_IPAD_VALUE 0x36
1092 #define HMAC_OPAD_VALUE 0x5c
1093 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1095 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1097 #ifdef RTE_QAT_LIBIPSECMB
1098 static int aes_ipsecmb_job(uint8_t *in, uint8_t *out, IMB_MGR *m,
1099 const uint8_t *key, uint16_t auth_keylen)
1102 struct IMB_JOB *job;
1103 DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
1104 DECLARE_ALIGNED(uint32_t dust[4*15], 16);
1106 if (auth_keylen == ICP_QAT_HW_AES_128_KEY_SZ)
1107 IMB_AES_KEYEXP_128(m, key, expkey, dust);
1108 else if (auth_keylen == ICP_QAT_HW_AES_192_KEY_SZ)
1109 IMB_AES_KEYEXP_192(m, key, expkey, dust);
1110 else if (auth_keylen == ICP_QAT_HW_AES_256_KEY_SZ)
1111 IMB_AES_KEYEXP_256(m, key, expkey, dust);
1115 job = IMB_GET_NEXT_JOB(m);
1119 job->enc_keys = expkey;
1120 job->key_len_in_bytes = auth_keylen;
1121 job->msg_len_to_cipher_in_bytes = 16;
1122 job->iv_len_in_bytes = 0;
1123 job->cipher_direction = IMB_DIR_ENCRYPT;
1124 job->cipher_mode = IMB_CIPHER_ECB;
1125 job->hash_alg = IMB_AUTH_NULL;
1127 while (IMB_FLUSH_JOB(m) != NULL)
1130 job = IMB_SUBMIT_JOB(m);
1132 if (job->status == IMB_STATUS_COMPLETED)
1136 err = imb_get_errno(m);
1138 QAT_LOG(ERR, "Error: %s!\n", imb_get_strerror(err));
1144 partial_hash_compute_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
1145 uint8_t *data_in, uint8_t *data_out, IMB_MGR *m)
1148 uint8_t digest[qat_hash_get_digest_size(
1149 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1150 uint32_t *hash_state_out_be32;
1151 uint64_t *hash_state_out_be64;
1154 /* Initialize to avoid gcc warning */
1155 memset(digest, 0, sizeof(digest));
1157 digest_size = qat_hash_get_digest_size(hash_alg);
1158 if (digest_size <= 0)
1161 hash_state_out_be32 = (uint32_t *)data_out;
1162 hash_state_out_be64 = (uint64_t *)data_out;
1165 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1166 IMB_SHA1_ONE_BLOCK(m, data_in, digest);
1167 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1168 *hash_state_out_be32 =
1169 rte_bswap32(*(((uint32_t *)digest)+i));
1171 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1172 IMB_SHA224_ONE_BLOCK(m, data_in, digest);
1173 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1174 *hash_state_out_be32 =
1175 rte_bswap32(*(((uint32_t *)digest)+i));
1177 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1178 IMB_SHA256_ONE_BLOCK(m, data_in, digest);
1179 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1180 *hash_state_out_be32 =
1181 rte_bswap32(*(((uint32_t *)digest)+i));
1183 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1184 IMB_SHA384_ONE_BLOCK(m, data_in, digest);
1185 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1186 *hash_state_out_be64 =
1187 rte_bswap64(*(((uint64_t *)digest)+i));
1189 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1190 IMB_SHA512_ONE_BLOCK(m, data_in, digest);
1191 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1192 *hash_state_out_be64 =
1193 rte_bswap64(*(((uint64_t *)digest)+i));
1195 case ICP_QAT_HW_AUTH_ALGO_MD5:
1196 IMB_MD5_ONE_BLOCK(m, data_in, data_out);
1199 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1206 static int qat_sym_do_precomputes_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
1207 const uint8_t *auth_key,
1208 uint16_t auth_keylen,
1209 uint8_t *p_state_buf,
1210 uint16_t *p_state_len,
1214 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1215 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1217 uint8_t in[ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ];
1220 m = alloc_mb_mgr(0);
1224 init_mb_mgr_auto(m, NULL);
1225 memset(in, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1226 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1231 auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1232 rte_memcpy(p_state_buf, auth_key, auth_keylen);
1234 DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
1235 DECLARE_ALIGNED(uint32_t dust[4*15], 16);
1236 IMB_AES_KEYEXP_128(m, p_state_buf, expkey, dust);
1237 k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1238 k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1240 IMB_AES_CMAC_SUBKEY_GEN_128(m, expkey, k1, k2);
1241 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1245 static uint8_t qat_aes_xcbc_key_seed[
1246 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1247 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1248 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1249 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1250 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1251 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1252 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1255 uint8_t *input = in;
1256 uint8_t *out = p_state_buf;
1257 rte_memcpy(input, qat_aes_xcbc_key_seed,
1258 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1259 for (i = 0; i < HASH_XCBC_PRECOMP_KEY_NUM; i++) {
1260 if (aes_ipsecmb_job(input, out, m, auth_key, auth_keylen)) {
1262 (i * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1263 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1268 input += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1269 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1271 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1274 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1275 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1276 uint8_t *out = p_state_buf;
1278 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1279 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1280 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1281 if (aes_ipsecmb_job(in, out, m, auth_key, auth_keylen)) {
1286 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1287 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1288 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1292 block_size = qat_hash_get_block_size(hash_alg);
1293 if (block_size < 0) {
1298 if (auth_keylen > (unsigned int)block_size) {
1299 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1303 /* init ipad and opad from key and xor with fixed values */
1304 memset(ipad, 0, block_size);
1305 memset(opad, 0, block_size);
1306 rte_memcpy(ipad, auth_key, auth_keylen);
1307 rte_memcpy(opad, auth_key, auth_keylen);
1309 for (i = 0; i < block_size; i++) {
1310 uint8_t *ipad_ptr = ipad + i;
1311 uint8_t *opad_ptr = opad + i;
1312 *ipad_ptr ^= HMAC_IPAD_VALUE;
1313 *opad_ptr ^= HMAC_OPAD_VALUE;
1316 /* do partial hash of ipad and copy to state1 */
1317 if (partial_hash_compute_ipsec_mb(hash_alg, ipad, p_state_buf, m)) {
1318 QAT_LOG(ERR, "ipad precompute failed");
1324 * State len is a multiple of 8, so may be larger than the digest.
1325 * Put the partial hash of opad state_len bytes after state1
1327 *p_state_len = qat_hash_get_state1_size(hash_alg);
1328 if (partial_hash_compute_ipsec_mb(hash_alg, opad,
1329 p_state_buf + *p_state_len, m)) {
1330 QAT_LOG(ERR, "opad precompute failed");
1336 /* don't leave data lying around */
1337 memset(ipad, 0, block_size);
1338 memset(opad, 0, block_size);
1343 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1347 if (!SHA1_Init(&ctx))
1349 SHA1_Transform(&ctx, data_in);
1350 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1354 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1358 if (!SHA224_Init(&ctx))
1360 SHA256_Transform(&ctx, data_in);
1361 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1365 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1369 if (!SHA256_Init(&ctx))
1371 SHA256_Transform(&ctx, data_in);
1372 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1376 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1380 if (!SHA384_Init(&ctx))
1382 SHA512_Transform(&ctx, data_in);
1383 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1387 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1391 if (!SHA512_Init(&ctx))
1393 SHA512_Transform(&ctx, data_in);
1394 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1398 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1402 if (!MD5_Init(&ctx))
1404 MD5_Transform(&ctx, data_in);
1405 rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1410 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1414 derived[0] = base[0] << 1;
1415 for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1416 derived[i] = base[i] << 1;
1417 derived[i - 1] |= base[i] >> 7;
1421 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1425 partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1426 uint8_t *data_in, uint8_t *data_out)
1429 uint8_t digest[qat_hash_get_digest_size(
1430 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1431 uint32_t *hash_state_out_be32;
1432 uint64_t *hash_state_out_be64;
1435 /* Initialize to avoid gcc warning */
1436 memset(digest, 0, sizeof(digest));
1438 digest_size = qat_hash_get_digest_size(hash_alg);
1439 if (digest_size <= 0)
1442 hash_state_out_be32 = (uint32_t *)data_out;
1443 hash_state_out_be64 = (uint64_t *)data_out;
1446 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1447 if (partial_hash_sha1(data_in, digest))
1449 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1450 *hash_state_out_be32 =
1451 rte_bswap32(*(((uint32_t *)digest)+i));
1453 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1454 if (partial_hash_sha224(data_in, digest))
1456 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1457 *hash_state_out_be32 =
1458 rte_bswap32(*(((uint32_t *)digest)+i));
1460 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1461 if (partial_hash_sha256(data_in, digest))
1463 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1464 *hash_state_out_be32 =
1465 rte_bswap32(*(((uint32_t *)digest)+i));
1467 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1468 if (partial_hash_sha384(data_in, digest))
1470 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1471 *hash_state_out_be64 =
1472 rte_bswap64(*(((uint64_t *)digest)+i));
1474 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1475 if (partial_hash_sha512(data_in, digest))
1477 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1478 *hash_state_out_be64 =
1479 rte_bswap64(*(((uint64_t *)digest)+i));
1481 case ICP_QAT_HW_AUTH_ALGO_MD5:
1482 if (partial_hash_md5(data_in, data_out))
1486 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1493 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1494 const uint8_t *auth_key,
1495 uint16_t auth_keylen,
1496 uint8_t *p_state_buf,
1497 uint16_t *p_state_len,
1501 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1502 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1505 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1511 uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1514 auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1516 in = rte_zmalloc("AES CMAC K1",
1517 ICP_QAT_HW_AES_128_KEY_SZ, 16);
1520 QAT_LOG(ERR, "Failed to alloc memory");
1524 rte_memcpy(in, AES_CMAC_SEED,
1525 ICP_QAT_HW_AES_128_KEY_SZ);
1526 rte_memcpy(p_state_buf, auth_key, auth_keylen);
1528 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1534 AES_encrypt(in, k0, &enc_key);
1536 k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1537 k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1539 aes_cmac_key_derive(k0, k1);
1540 aes_cmac_key_derive(k1, k2);
1542 memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1543 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1547 static uint8_t qat_aes_xcbc_key_seed[
1548 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1549 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1550 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1551 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1552 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1553 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1554 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1558 uint8_t *out = p_state_buf;
1562 in = rte_zmalloc("working mem for key",
1563 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1565 QAT_LOG(ERR, "Failed to alloc memory");
1569 rte_memcpy(in, qat_aes_xcbc_key_seed,
1570 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1571 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1572 if (AES_set_encrypt_key(auth_key,
1576 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1578 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1579 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1582 AES_encrypt(in, out, &enc_key);
1583 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1584 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1586 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1587 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1591 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1592 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1594 uint8_t *out = p_state_buf;
1597 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1598 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1599 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1600 in = rte_zmalloc("working mem for key",
1601 ICP_QAT_HW_GALOIS_H_SZ, 16);
1603 QAT_LOG(ERR, "Failed to alloc memory");
1607 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1608 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1612 AES_encrypt(in, out, &enc_key);
1613 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1614 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1615 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1620 block_size = qat_hash_get_block_size(hash_alg);
1623 /* init ipad and opad from key and xor with fixed values */
1624 memset(ipad, 0, block_size);
1625 memset(opad, 0, block_size);
1627 if (auth_keylen > (unsigned int)block_size) {
1628 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1631 rte_memcpy(ipad, auth_key, auth_keylen);
1632 rte_memcpy(opad, auth_key, auth_keylen);
1634 for (i = 0; i < block_size; i++) {
1635 uint8_t *ipad_ptr = ipad + i;
1636 uint8_t *opad_ptr = opad + i;
1637 *ipad_ptr ^= HMAC_IPAD_VALUE;
1638 *opad_ptr ^= HMAC_OPAD_VALUE;
1641 /* do partial hash of ipad and copy to state1 */
1642 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1643 memset(ipad, 0, block_size);
1644 memset(opad, 0, block_size);
1645 QAT_LOG(ERR, "ipad precompute failed");
1650 * State len is a multiple of 8, so may be larger than the digest.
1651 * Put the partial hash of opad state_len bytes after state1
1653 *p_state_len = qat_hash_get_state1_size(hash_alg);
1654 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1655 memset(ipad, 0, block_size);
1656 memset(opad, 0, block_size);
1657 QAT_LOG(ERR, "opad precompute failed");
1661 /* don't leave data lying around */
1662 memset(ipad, 0, block_size);
1663 memset(opad, 0, block_size);
1668 qat_sym_session_init_common_hdr(struct qat_sym_session *session)
1670 struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
1671 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1672 enum qat_sym_proto_flag proto_flags = session->qat_proto_flag;
1673 uint32_t slice_flags = session->slice_types;
1676 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1677 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1678 header->service_cmd_id = session->qat_cmd;
1679 header->comn_req_flags =
1680 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1681 QAT_COMN_PTR_TYPE_FLAT);
1682 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1683 ICP_QAT_FW_LA_PARTIAL_NONE);
1684 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1685 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1687 switch (proto_flags) {
1688 case QAT_CRYPTO_PROTO_FLAG_NONE:
1689 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1690 ICP_QAT_FW_LA_NO_PROTO);
1692 case QAT_CRYPTO_PROTO_FLAG_CCM:
1693 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1694 ICP_QAT_FW_LA_CCM_PROTO);
1696 case QAT_CRYPTO_PROTO_FLAG_GCM:
1697 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1698 ICP_QAT_FW_LA_GCM_PROTO);
1700 case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1701 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1702 ICP_QAT_FW_LA_SNOW_3G_PROTO);
1704 case QAT_CRYPTO_PROTO_FLAG_ZUC:
1705 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1706 ICP_QAT_FW_LA_ZUC_3G_PROTO);
1710 /* More than one of the following flags can be set at once */
1711 if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_SPC)) {
1712 ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
1713 header->serv_specif_flags,
1714 ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
1716 if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_UCS)) {
1717 ICP_QAT_FW_LA_SLICE_TYPE_SET(
1718 header->serv_specif_flags,
1719 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
1722 if (session->is_auth) {
1723 if (session->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1724 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1725 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1726 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1727 ICP_QAT_FW_LA_CMP_AUTH_RES);
1728 } else if (session->auth_op == ICP_QAT_HW_AUTH_GENERATE) {
1729 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1730 ICP_QAT_FW_LA_RET_AUTH_RES);
1731 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1732 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1735 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1736 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1737 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1738 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1741 if (session->is_iv12B) {
1742 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1743 header->serv_specif_flags,
1744 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1747 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1748 ICP_QAT_FW_LA_NO_UPDATE_STATE);
1749 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1750 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1753 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
1754 const uint8_t *cipherkey,
1755 uint32_t cipherkeylen)
1757 struct icp_qat_hw_cipher_algo_blk *cipher;
1758 struct icp_qat_hw_cipher_algo_blk20 *cipher20;
1759 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1760 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1761 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1762 void *ptr = &req_tmpl->cd_ctrl;
1763 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1764 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1765 enum icp_qat_hw_cipher_convert key_convert;
1766 struct icp_qat_fw_la_cipher_20_req_params *req_ucs =
1767 (struct icp_qat_fw_la_cipher_20_req_params *)
1768 &cdesc->fw_req.serv_specif_rqpars;
1769 struct icp_qat_fw_la_cipher_req_params *req_cipher =
1770 (struct icp_qat_fw_la_cipher_req_params *)
1771 &cdesc->fw_req.serv_specif_rqpars;
1772 uint32_t total_key_size;
1773 uint16_t cipher_offset, cd_size;
1774 uint32_t wordIndex = 0;
1775 uint32_t *temp_key = NULL;
1777 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1778 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1779 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1780 ICP_QAT_FW_SLICE_CIPHER);
1781 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1782 ICP_QAT_FW_SLICE_DRAM_WR);
1783 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1784 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1785 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1786 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1787 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1788 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1789 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1790 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1791 ICP_QAT_FW_SLICE_CIPHER);
1792 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1793 ICP_QAT_FW_SLICE_AUTH);
1794 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1795 ICP_QAT_FW_SLICE_AUTH);
1796 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1797 ICP_QAT_FW_SLICE_DRAM_WR);
1798 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1799 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1800 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1804 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1806 * CTR Streaming ciphers are a special case. Decrypt = encrypt
1807 * Overriding default values previously set.
1808 * Chacha20-Poly1305 is special case, CTR but single-pass
1809 * so both direction need to be used.
1811 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1812 if (cdesc->qat_cipher_alg ==
1813 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 &&
1814 cdesc->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1815 cdesc->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
1817 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1818 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1819 || cdesc->qat_cipher_alg ==
1820 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1821 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1822 else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1823 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1824 else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE)
1825 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1827 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1829 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1830 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1831 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1832 cipher_cd_ctrl->cipher_state_sz =
1833 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1834 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1836 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1837 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1838 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1839 cipher_cd_ctrl->cipher_padding_sz =
1840 (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1841 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1842 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1843 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1844 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1845 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1846 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1847 } else if (cdesc->qat_cipher_alg ==
1848 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1849 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1850 ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1851 cipher_cd_ctrl->cipher_state_sz =
1852 ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1853 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1855 total_key_size = cipherkeylen;
1856 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1858 cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1859 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1861 cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1862 cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
1863 cipher->cipher_config.val =
1864 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1865 cdesc->qat_cipher_alg, key_convert,
1868 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1869 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1870 sizeof(struct icp_qat_hw_cipher_config)
1872 memcpy(cipher->key, cipherkey, cipherkeylen);
1873 memcpy(temp_key, cipherkey, cipherkeylen);
1875 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1876 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1878 temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1880 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1881 cipherkeylen + cipherkeylen;
1882 } else if (cdesc->is_ucs) {
1883 const uint8_t *final_key = cipherkey;
1885 cdesc->slice_types |= QAT_CRYPTO_SLICE_UCS;
1886 total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
1887 ICP_QAT_HW_AES_128_KEY_SZ);
1888 cipher20->cipher_config.reserved[0] = 0;
1889 cipher20->cipher_config.reserved[1] = 0;
1890 cipher20->cipher_config.reserved[2] = 0;
1892 rte_memcpy(cipher20->key, final_key, cipherkeylen);
1893 cdesc->cd_cur_ptr +=
1894 sizeof(struct icp_qat_hw_ucs_cipher_config) +
1897 memcpy(cipher->key, cipherkey, cipherkeylen);
1898 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1902 if (cdesc->is_single_pass) {
1903 QAT_FIELD_SET(cipher->cipher_config.val,
1904 cdesc->digest_length,
1905 QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
1906 QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
1907 /* UCS and SPC 1.8/2.0 share configuration of 2nd config word */
1908 cdesc->cd.cipher.cipher_config.reserved =
1909 ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
1911 cdesc->slice_types |= QAT_CRYPTO_SLICE_SPC;
1914 if (total_key_size > cipherkeylen) {
1915 uint32_t padding_size = total_key_size-cipherkeylen;
1916 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1917 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1918 /* K3 not provided so use K1 = K3*/
1919 memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1920 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1921 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1922 /* K2 and K3 not provided so use K1 = K2 = K3*/
1923 memcpy(cdesc->cd_cur_ptr, cipherkey,
1925 memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1926 cipherkey, cipherkeylen);
1928 memset(cdesc->cd_cur_ptr, 0, padding_size);
1930 cdesc->cd_cur_ptr += padding_size;
1932 if (cdesc->is_ucs) {
1934 * These values match in terms of position auth
1935 * slice request fields
1937 req_ucs->spc_auth_res_sz = cdesc->digest_length;
1938 if (!cdesc->is_gmac) {
1939 req_ucs->spc_aad_sz = cdesc->aad_len;
1940 req_ucs->spc_aad_offset = 0;
1942 } else if (cdesc->is_single_pass) {
1943 req_cipher->spc_aad_sz = cdesc->aad_len;
1944 req_cipher->spc_auth_res_sz = cdesc->digest_length;
1946 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1947 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1948 cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1953 int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
1954 const uint8_t *authkey,
1955 uint32_t authkeylen,
1956 uint32_t aad_length,
1957 uint32_t digestsize,
1958 unsigned int operation)
1960 struct icp_qat_hw_auth_setup *hash;
1961 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1962 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1963 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1964 void *ptr = &req_tmpl->cd_ctrl;
1965 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1966 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1967 struct icp_qat_fw_la_auth_req_params *auth_param =
1968 (struct icp_qat_fw_la_auth_req_params *)
1969 ((char *)&req_tmpl->serv_specif_rqpars +
1970 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1971 uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1972 uint16_t hash_offset, cd_size;
1973 uint32_t *aad_len = NULL;
1974 uint32_t wordIndex = 0;
1978 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1979 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1980 ICP_QAT_FW_SLICE_AUTH);
1981 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1982 ICP_QAT_FW_SLICE_DRAM_WR);
1983 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1984 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1985 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1986 ICP_QAT_FW_SLICE_AUTH);
1987 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1988 ICP_QAT_FW_SLICE_CIPHER);
1989 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1990 ICP_QAT_FW_SLICE_CIPHER);
1991 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1992 ICP_QAT_FW_SLICE_DRAM_WR);
1993 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1994 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1995 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1999 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY)
2000 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
2002 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
2005 * Setup the inner hash config
2007 hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
2008 hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
2009 hash->auth_config.reserved = 0;
2010 hash->auth_config.config =
2011 ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
2012 cdesc->qat_hash_alg, digestsize);
2014 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
2015 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
2016 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
2017 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
2018 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
2019 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
2020 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
2021 || cdesc->is_cnt_zero
2023 hash->auth_counter.counter = 0;
2025 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
2029 hash->auth_counter.counter = rte_bswap32(block_size);
2032 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
2035 * cd_cur_ptr now points at the state1 information.
2037 switch (cdesc->qat_hash_alg) {
2038 case ICP_QAT_HW_AUTH_ALGO_SHA1:
2039 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2041 rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
2042 sizeof(sha1InitialState));
2043 state1_size = qat_hash_get_state1_size(
2044 cdesc->qat_hash_alg);
2048 if (qat_ipsec_mb_lib) {
2049 #ifdef RTE_QAT_LIBIPSECMB
2050 ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA1,
2051 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2054 QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
2058 ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
2059 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2064 QAT_LOG(ERR, "(SHA)precompute failed");
2067 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
2069 case ICP_QAT_HW_AUTH_ALGO_SHA224:
2070 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2072 rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
2073 sizeof(sha224InitialState));
2074 state1_size = qat_hash_get_state1_size(
2075 cdesc->qat_hash_alg);
2079 if (qat_ipsec_mb_lib) {
2080 #ifdef RTE_QAT_LIBIPSECMB
2081 ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA224,
2082 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2085 QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
2089 ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
2090 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2095 QAT_LOG(ERR, "(SHA)precompute failed");
2098 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
2100 case ICP_QAT_HW_AUTH_ALGO_SHA256:
2101 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2103 rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
2104 sizeof(sha256InitialState));
2105 state1_size = qat_hash_get_state1_size(
2106 cdesc->qat_hash_alg);
2110 if (qat_ipsec_mb_lib) {
2111 #ifdef RTE_QAT_LIBIPSECMB
2112 ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA256,
2113 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2116 QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
2120 ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
2121 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2126 QAT_LOG(ERR, "(SHA)precompute failed");
2129 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
2131 case ICP_QAT_HW_AUTH_ALGO_SHA384:
2132 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2134 rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
2135 sizeof(sha384InitialState));
2136 state1_size = qat_hash_get_state1_size(
2137 cdesc->qat_hash_alg);
2141 if (qat_ipsec_mb_lib) {
2142 #ifdef RTE_QAT_LIBIPSECMB
2143 ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA384,
2144 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2147 QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
2151 ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
2152 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2157 QAT_LOG(ERR, "(SHA)precompute failed");
2160 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
2162 case ICP_QAT_HW_AUTH_ALGO_SHA512:
2163 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2165 rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
2166 sizeof(sha512InitialState));
2167 state1_size = qat_hash_get_state1_size(
2168 cdesc->qat_hash_alg);
2172 if (qat_ipsec_mb_lib) {
2173 #ifdef RTE_QAT_LIBIPSECMB
2174 ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA512,
2175 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2178 QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
2182 ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
2183 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2188 QAT_LOG(ERR, "(SHA)precompute failed");
2191 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
2193 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
2194 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
2196 if (cdesc->aes_cmac)
2197 memset(cdesc->cd_cur_ptr, 0, state1_size);
2198 if (qat_ipsec_mb_lib) {
2199 #ifdef RTE_QAT_LIBIPSECMB
2200 ret = qat_sym_do_precomputes_ipsec_mb(
2201 ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
2202 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
2203 &state2_size, cdesc->aes_cmac);
2205 QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
2209 ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
2210 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
2211 &state2_size, cdesc->aes_cmac);
2215 cdesc->aes_cmac ? QAT_LOG(ERR,
2216 "(CMAC)precompute failed")
2218 "(XCBC)precompute failed");
2222 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
2223 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
2224 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
2225 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
2226 if (qat_ipsec_mb_lib) {
2227 #ifdef RTE_QAT_LIBIPSECMB
2228 ret = qat_sym_do_precomputes_ipsec_mb(cdesc->qat_hash_alg, authkey,
2229 authkeylen, cdesc->cd_cur_ptr + state1_size,
2230 &state2_size, cdesc->aes_cmac);
2232 QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
2236 ret = qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
2237 authkeylen, cdesc->cd_cur_ptr + state1_size,
2238 &state2_size, cdesc->aes_cmac);
2242 QAT_LOG(ERR, "(GCM)precompute failed");
2246 * Write (the length of AAD) into bytes 16-19 of state2
2247 * in big-endian format. This field is 8 bytes
2249 auth_param->u2.aad_sz =
2250 RTE_ALIGN_CEIL(aad_length, 16);
2251 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
2253 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
2254 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
2255 ICP_QAT_HW_GALOIS_H_SZ);
2256 *aad_len = rte_bswap32(aad_length);
2257 cdesc->aad_len = aad_length;
2259 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
2260 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
2261 state1_size = qat_hash_get_state1_size(
2262 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
2263 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
2264 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
2266 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
2267 (cdesc->cd_cur_ptr + state1_size + state2_size);
2268 cipherconfig->cipher_config.val =
2269 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
2270 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
2271 ICP_QAT_HW_CIPHER_KEY_CONVERT,
2272 ICP_QAT_HW_CIPHER_ENCRYPT);
2273 memcpy(cipherconfig->key, authkey, authkeylen);
2274 memset(cipherconfig->key + authkeylen,
2275 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
2276 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
2277 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
2278 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
2280 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
2281 hash->auth_config.config =
2282 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
2283 cdesc->qat_hash_alg, digestsize);
2284 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
2285 state1_size = qat_hash_get_state1_size(
2286 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
2287 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
2288 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
2289 + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
2291 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2292 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
2293 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
2296 case ICP_QAT_HW_AUTH_ALGO_MD5:
2297 if (qat_ipsec_mb_lib) {
2298 #ifdef RTE_QAT_LIBIPSECMB
2299 ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_MD5,
2300 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2303 QAT_LOG(ERR, "Intel IPSEC-MB LIB missing");
2307 ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
2308 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2313 QAT_LOG(ERR, "(MD5)precompute failed");
2316 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
2318 case ICP_QAT_HW_AUTH_ALGO_NULL:
2319 state1_size = qat_hash_get_state1_size(
2320 ICP_QAT_HW_AUTH_ALGO_NULL);
2321 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
2323 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
2324 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
2325 state1_size = qat_hash_get_state1_size(
2326 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
2327 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
2328 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
2330 if (aad_length > 0) {
2331 aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
2332 ICP_QAT_HW_CCM_AAD_LEN_INFO;
2333 auth_param->u2.aad_sz =
2334 RTE_ALIGN_CEIL(aad_length,
2335 ICP_QAT_HW_CCM_AAD_ALIGNMENT);
2337 auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
2339 cdesc->aad_len = aad_length;
2340 hash->auth_counter.counter = 0;
2342 hash_cd_ctrl->outer_prefix_sz = digestsize;
2343 auth_param->hash_state_sz = digestsize;
2345 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2347 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
2348 state1_size = qat_hash_get_state1_size(
2349 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
2350 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
2351 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
2352 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
2355 * The Inner Hash Initial State2 block must contain IK
2356 * (Initialisation Key), followed by IK XOR-ed with KM
2357 * (Key Modifier): IK||(IK^KM).
2359 /* write the auth key */
2360 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2361 /* initialise temp key with auth key */
2362 memcpy(pTempKey, authkey, authkeylen);
2363 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
2364 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
2365 pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
2368 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
2372 /* Auth CD config setup */
2373 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
2374 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
2375 hash_cd_ctrl->inner_res_sz = digestsize;
2376 hash_cd_ctrl->final_sz = digestsize;
2377 hash_cd_ctrl->inner_state1_sz = state1_size;
2378 auth_param->auth_res_sz = digestsize;
2380 hash_cd_ctrl->inner_state2_sz = state2_size;
2381 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2382 ((sizeof(struct icp_qat_hw_auth_setup) +
2383 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2386 cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2387 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2389 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2390 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2395 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2398 case ICP_QAT_HW_AES_128_KEY_SZ:
2399 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2401 case ICP_QAT_HW_AES_192_KEY_SZ:
2402 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2404 case ICP_QAT_HW_AES_256_KEY_SZ:
2405 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2413 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2414 enum icp_qat_hw_cipher_algo *alg)
2417 case ICP_QAT_HW_AES_128_KEY_SZ:
2418 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2420 case ICP_QAT_HW_AES_256_KEY_SZ:
2421 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2429 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2432 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2433 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2441 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2444 case ICP_QAT_HW_KASUMI_KEY_SZ:
2445 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2453 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2456 case ICP_QAT_HW_DES_KEY_SZ:
2457 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2465 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2468 case QAT_3DES_KEY_SZ_OPT1:
2469 case QAT_3DES_KEY_SZ_OPT2:
2470 case QAT_3DES_KEY_SZ_OPT3:
2471 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2479 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2482 case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2483 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2491 #ifdef RTE_LIB_SECURITY
2493 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2495 struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2496 struct rte_security_docsis_xform *docsis = &conf->docsis;
2498 /* CRC generate -> Cipher encrypt */
2499 if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2501 if (crypto_sym != NULL &&
2502 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2503 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2504 crypto_sym->cipher.algo ==
2505 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2506 (crypto_sym->cipher.key.length ==
2507 ICP_QAT_HW_AES_128_KEY_SZ ||
2508 crypto_sym->cipher.key.length ==
2509 ICP_QAT_HW_AES_256_KEY_SZ) &&
2510 crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2511 crypto_sym->next == NULL) {
2514 /* Cipher decrypt -> CRC verify */
2515 } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2517 if (crypto_sym != NULL &&
2518 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2519 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2520 crypto_sym->cipher.algo ==
2521 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2522 (crypto_sym->cipher.key.length ==
2523 ICP_QAT_HW_AES_128_KEY_SZ ||
2524 crypto_sym->cipher.key.length ==
2525 ICP_QAT_HW_AES_256_KEY_SZ) &&
2526 crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2527 crypto_sym->next == NULL) {
2536 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2537 struct rte_security_session_conf *conf, void *session_private)
2541 struct rte_crypto_sym_xform *xform = NULL;
2542 struct qat_sym_session *session = session_private;
2544 /* Clear the session */
2545 memset(session, 0, qat_sym_session_get_private_size(dev));
2547 ret = qat_sec_session_check_docsis(conf);
2549 QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2553 xform = conf->crypto_xform;
2555 /* Verify the session physical address is known */
2556 rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2557 if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2559 "Session physical address unknown. Bad memory pool.");
2563 /* Set context descriptor physical address */
2564 session->cd_paddr = session_paddr +
2565 offsetof(struct qat_sym_session, cd);
2567 /* Get requested QAT command id - should be cipher */
2568 qat_cmd_id = qat_get_cmd_id(xform);
2569 if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2570 QAT_LOG(ERR, "Unsupported xform chain requested");
2573 session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2575 ret = qat_sym_session_configure_cipher(dev, xform, session);
2578 qat_sym_session_finalize(session);
2584 qat_security_session_create(void *dev,
2585 struct rte_security_session_conf *conf,
2586 struct rte_security_session *sess,
2587 struct rte_mempool *mempool)
2589 void *sess_private_data;
2590 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2591 struct qat_cryptodev_private *internals = cdev->data->dev_private;
2592 enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
2593 struct qat_sym_session *sym_session = NULL;
2596 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2597 conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2598 QAT_LOG(ERR, "Invalid security protocol");
2602 if (rte_mempool_get(mempool, &sess_private_data)) {
2603 QAT_LOG(ERR, "Couldn't get object from session mempool");
2607 ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2610 QAT_LOG(ERR, "Failed to configure session parameters");
2611 /* Return session to mempool */
2612 rte_mempool_put(mempool, sess_private_data);
2616 set_sec_session_private_data(sess, sess_private_data);
2617 sym_session = (struct qat_sym_session *)sess_private_data;
2618 sym_session->dev_id = internals->dev_id;
2620 return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
2625 qat_security_session_destroy(void *dev __rte_unused,
2626 struct rte_security_session *sess)
2628 void *sess_priv = get_sec_session_private_data(sess);
2629 struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2633 bpi_cipher_ctx_free(s->bpi_ctx);
2634 memset(s, 0, qat_sym_session_get_private_size(dev));
2635 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2637 set_sec_session_private_data(sess, NULL);
2638 rte_mempool_put(sess_mp, sess_priv);