1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 * Copyright(c) 2015-2019 Intel Corporation
5 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
6 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
7 #include <openssl/md5.h> /* Needed to calculate pre-compute values */
8 #include <openssl/evp.h> /* Needed for bpi runt block processing */
10 #include <rte_memcpy.h>
11 #include <rte_common.h>
12 #include <rte_spinlock.h>
13 #include <rte_byteorder.h>
15 #include <rte_malloc.h>
16 #include <rte_crypto_sym.h>
17 #ifdef RTE_LIB_SECURITY
18 #include <rte_security.h>
22 #include "qat_sym_session.h"
23 #include "qat_sym_pmd.h"
25 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
26 static const uint8_t sha1InitialState[] = {
27 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
28 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
30 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
31 static const uint8_t sha224InitialState[] = {
32 0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
33 0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
34 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
36 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
37 static const uint8_t sha256InitialState[] = {
38 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
39 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
40 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
42 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
43 static const uint8_t sha384InitialState[] = {
44 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
45 0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
46 0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
47 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
48 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
49 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
51 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
52 static const uint8_t sha512InitialState[] = {
53 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
54 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
55 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
56 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
57 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
58 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
61 qat_sym_cd_cipher_set(struct qat_sym_session *cd,
62 const uint8_t *enckey,
66 qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
67 const uint8_t *authkey,
71 unsigned int operation);
73 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
75 /* Req/cd init functions */
78 qat_sym_session_finalize(struct qat_sym_session *session)
80 qat_sym_session_init_common_hdr(session);
83 /** Frees a context previously created
84 * Depends on openssl libcrypto
87 bpi_cipher_ctx_free(void *bpi_ctx)
90 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
93 /** Creates a context in either AES or DES in ECB mode
94 * Depends on openssl libcrypto
97 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
98 enum rte_crypto_cipher_operation direction __rte_unused,
99 const uint8_t *key, uint16_t key_length, void **ctx)
101 const EVP_CIPHER *algo = NULL;
103 *ctx = EVP_CIPHER_CTX_new();
110 if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
111 algo = EVP_des_ecb();
113 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
114 algo = EVP_aes_128_ecb();
116 algo = EVP_aes_256_ecb();
118 /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
119 if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
128 EVP_CIPHER_CTX_free(*ctx);
133 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
134 struct qat_sym_dev_private *internals)
137 const struct rte_cryptodev_capabilities *capability;
139 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
140 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
141 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
144 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
147 if (capability->sym.cipher.algo == algo)
154 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
155 struct qat_sym_dev_private *internals)
158 const struct rte_cryptodev_capabilities *capability;
160 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
161 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
162 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
165 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
168 if (capability->sym.auth.algo == algo)
175 qat_sym_session_clear(struct rte_cryptodev *dev,
176 struct rte_cryptodev_sym_session *sess)
178 uint8_t index = dev->driver_id;
179 void *sess_priv = get_sym_session_private_data(sess, index);
180 struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
184 bpi_cipher_ctx_free(s->bpi_ctx);
185 memset(s, 0, qat_sym_session_get_private_size(dev));
186 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
188 set_sym_session_private_data(sess, index, NULL);
189 rte_mempool_put(sess_mp, sess_priv);
194 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
197 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
198 return ICP_QAT_FW_LA_CMD_CIPHER;
200 /* Authentication Only */
201 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
202 return ICP_QAT_FW_LA_CMD_AUTH;
205 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
206 /* AES-GCM and AES-CCM works with different direction
207 * GCM first encrypts and generate hash where AES-CCM
208 * first generate hash and encrypts. Similar relation
209 * applies to decryption.
211 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
212 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
213 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
215 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
217 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
218 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
220 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
223 if (xform->next == NULL)
226 /* Cipher then Authenticate */
227 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
228 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
229 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
231 /* Authenticate then Cipher */
232 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
233 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
234 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
239 static struct rte_crypto_auth_xform *
240 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
243 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
252 static struct rte_crypto_cipher_xform *
253 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
256 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
257 return &xform->cipher;
266 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
267 struct rte_crypto_sym_xform *xform,
268 struct qat_sym_session *session)
270 struct qat_sym_dev_private *internals = dev->data->dev_private;
271 struct rte_crypto_cipher_xform *cipher_xform = NULL;
272 enum qat_device_gen qat_dev_gen =
273 internals->qat_dev->qat_dev_gen;
276 /* Get cipher xform from crypto xform chain */
277 cipher_xform = qat_get_cipher_xform(xform);
279 session->cipher_iv.offset = cipher_xform->iv.offset;
280 session->cipher_iv.length = cipher_xform->iv.length;
282 switch (cipher_xform->algo) {
283 case RTE_CRYPTO_CIPHER_AES_CBC:
284 if (qat_sym_validate_aes_key(cipher_xform->key.length,
285 &session->qat_cipher_alg) != 0) {
286 QAT_LOG(ERR, "Invalid AES cipher key size");
290 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
292 case RTE_CRYPTO_CIPHER_AES_CTR:
293 if (qat_sym_validate_aes_key(cipher_xform->key.length,
294 &session->qat_cipher_alg) != 0) {
295 QAT_LOG(ERR, "Invalid AES cipher key size");
299 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
300 if (qat_dev_gen == QAT_GEN4)
303 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
304 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
305 &session->qat_cipher_alg) != 0) {
306 QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
310 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
312 case RTE_CRYPTO_CIPHER_NULL:
313 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
314 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
316 case RTE_CRYPTO_CIPHER_KASUMI_F8:
317 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
318 &session->qat_cipher_alg) != 0) {
319 QAT_LOG(ERR, "Invalid KASUMI cipher key size");
323 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
325 case RTE_CRYPTO_CIPHER_3DES_CBC:
326 if (qat_sym_validate_3des_key(cipher_xform->key.length,
327 &session->qat_cipher_alg) != 0) {
328 QAT_LOG(ERR, "Invalid 3DES cipher key size");
332 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
334 case RTE_CRYPTO_CIPHER_DES_CBC:
335 if (qat_sym_validate_des_key(cipher_xform->key.length,
336 &session->qat_cipher_alg) != 0) {
337 QAT_LOG(ERR, "Invalid DES cipher key size");
341 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
343 case RTE_CRYPTO_CIPHER_3DES_CTR:
344 if (qat_sym_validate_3des_key(cipher_xform->key.length,
345 &session->qat_cipher_alg) != 0) {
346 QAT_LOG(ERR, "Invalid 3DES cipher key size");
350 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
352 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
353 ret = bpi_cipher_ctx_init(
356 cipher_xform->key.data,
357 cipher_xform->key.length,
360 QAT_LOG(ERR, "failed to create DES BPI ctx");
363 if (qat_sym_validate_des_key(cipher_xform->key.length,
364 &session->qat_cipher_alg) != 0) {
365 QAT_LOG(ERR, "Invalid DES cipher key size");
369 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
371 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
372 ret = bpi_cipher_ctx_init(
375 cipher_xform->key.data,
376 cipher_xform->key.length,
379 QAT_LOG(ERR, "failed to create AES BPI ctx");
382 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
383 &session->qat_cipher_alg) != 0) {
384 QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
388 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
390 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
391 if (!qat_is_cipher_alg_supported(
392 cipher_xform->algo, internals)) {
393 QAT_LOG(ERR, "%s not supported on this device",
394 rte_crypto_cipher_algorithm_strings
395 [cipher_xform->algo]);
399 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
400 &session->qat_cipher_alg) != 0) {
401 QAT_LOG(ERR, "Invalid ZUC cipher key size");
405 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
407 case RTE_CRYPTO_CIPHER_AES_XTS:
408 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
409 QAT_LOG(ERR, "AES-XTS-192 not supported");
413 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
414 &session->qat_cipher_alg) != 0) {
415 QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
419 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
421 case RTE_CRYPTO_CIPHER_3DES_ECB:
422 case RTE_CRYPTO_CIPHER_AES_ECB:
423 case RTE_CRYPTO_CIPHER_AES_F8:
424 case RTE_CRYPTO_CIPHER_ARC4:
425 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
430 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
436 if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
437 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
439 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
441 if (qat_sym_cd_cipher_set(session,
442 cipher_xform->key.data,
443 cipher_xform->key.length)) {
451 if (session->bpi_ctx) {
452 bpi_cipher_ctx_free(session->bpi_ctx);
453 session->bpi_ctx = NULL;
459 qat_sym_session_configure(struct rte_cryptodev *dev,
460 struct rte_crypto_sym_xform *xform,
461 struct rte_cryptodev_sym_session *sess,
462 struct rte_mempool *mempool)
464 void *sess_private_data;
467 if (rte_mempool_get(mempool, &sess_private_data)) {
469 "Couldn't get object from session mempool");
473 ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
476 "Crypto QAT PMD: failed to configure session parameters");
478 /* Return session to mempool */
479 rte_mempool_put(mempool, sess_private_data);
483 set_sym_session_private_data(sess, dev->driver_id,
490 qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
493 struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
494 struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
495 (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
496 session->fw_req.cd_ctrl.content_desc_ctrl_lw;
498 /* Set the Use Extended Protocol Flags bit in LW 1 */
499 QAT_FIELD_SET(header->comn_req_flags,
500 QAT_COMN_EXT_FLAGS_USED,
501 QAT_COMN_EXT_FLAGS_BITPOS,
502 QAT_COMN_EXT_FLAGS_MASK);
504 /* Set Hash Flags in LW 28 */
505 cd_ctrl->hash_flags |= hash_flag;
507 /* Set proto flags in LW 1 */
508 switch (session->qat_cipher_alg) {
509 case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
510 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
511 ICP_QAT_FW_LA_SNOW_3G_PROTO);
512 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
513 header->serv_specif_flags, 0);
515 case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
516 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
517 ICP_QAT_FW_LA_NO_PROTO);
518 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
519 header->serv_specif_flags,
520 ICP_QAT_FW_LA_ZUC_3G_PROTO);
523 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
524 ICP_QAT_FW_LA_NO_PROTO);
525 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
526 header->serv_specif_flags, 0);
532 qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
533 struct qat_sym_session *session)
535 const struct qat_sym_dev_private *qat_private = dev->data->dev_private;
536 enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
537 QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
539 if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
540 session->qat_cipher_alg !=
541 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
542 session->min_qat_dev_gen = min_dev_gen;
543 qat_sym_session_set_ext_hash_flags(session,
544 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
545 } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
546 session->qat_cipher_alg !=
547 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
548 session->min_qat_dev_gen = min_dev_gen;
549 qat_sym_session_set_ext_hash_flags(session,
550 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
551 } else if ((session->aes_cmac ||
552 session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
553 (session->qat_cipher_alg ==
554 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
555 session->qat_cipher_alg ==
556 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
557 session->min_qat_dev_gen = min_dev_gen;
558 qat_sym_session_set_ext_hash_flags(session, 0);
563 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
564 struct rte_crypto_sym_xform *xform, void *session_private)
566 struct qat_sym_session *session = session_private;
567 struct qat_sym_dev_private *internals = dev->data->dev_private;
568 enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
571 int handle_mixed = 0;
573 /* Verify the session physical address is known */
574 rte_iova_t session_paddr = rte_mempool_virt2iova(session);
575 if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
577 "Session physical address unknown. Bad memory pool.");
581 memset(session, 0, sizeof(*session));
582 /* Set context descriptor physical address */
583 session->cd_paddr = session_paddr +
584 offsetof(struct qat_sym_session, cd);
586 session->min_qat_dev_gen = QAT_GEN1;
587 session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
590 /* Get requested QAT command id */
591 qat_cmd_id = qat_get_cmd_id(xform);
592 if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
593 QAT_LOG(ERR, "Unsupported xform chain requested");
596 session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
597 switch (session->qat_cmd) {
598 case ICP_QAT_FW_LA_CMD_CIPHER:
599 ret = qat_sym_session_configure_cipher(dev, xform, session);
603 case ICP_QAT_FW_LA_CMD_AUTH:
604 ret = qat_sym_session_configure_auth(dev, xform, session);
607 session->is_single_pass_gmac =
608 qat_dev_gen == QAT_GEN3 &&
609 xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
610 xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
612 case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
613 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
614 ret = qat_sym_session_configure_aead(dev, xform,
619 ret = qat_sym_session_configure_cipher(dev,
623 ret = qat_sym_session_configure_auth(dev,
630 case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
631 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
632 ret = qat_sym_session_configure_aead(dev, xform,
637 ret = qat_sym_session_configure_auth(dev,
641 ret = qat_sym_session_configure_cipher(dev,
648 case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
649 case ICP_QAT_FW_LA_CMD_TRNG_TEST:
650 case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
651 case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
652 case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
653 case ICP_QAT_FW_LA_CMD_MGF1:
654 case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
655 case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
656 case ICP_QAT_FW_LA_CMD_DELIMITER:
657 QAT_LOG(ERR, "Unsupported Service %u",
661 QAT_LOG(ERR, "Unsupported Service %u",
665 qat_sym_session_finalize(session);
667 /* Special handling of mixed hash+cipher algorithms */
668 qat_sym_session_handle_mixed(dev, session);
675 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
676 const struct rte_crypto_aead_xform *aead_xform)
678 session->is_single_pass = 1;
679 session->is_auth = 1;
680 session->min_qat_dev_gen = QAT_GEN3;
681 session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
682 /* Chacha-Poly is special case that use QAT CTR mode */
683 if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
684 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
686 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
688 session->cipher_iv.offset = aead_xform->iv.offset;
689 session->cipher_iv.length = aead_xform->iv.length;
690 session->aad_len = aead_xform->aad_length;
691 session->digest_length = aead_xform->digest_length;
693 if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
694 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
695 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
697 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
698 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
705 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
706 struct rte_crypto_sym_xform *xform,
707 struct qat_sym_session *session)
709 struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
710 struct qat_sym_dev_private *internals = dev->data->dev_private;
711 const uint8_t *key_data = auth_xform->key.data;
712 uint8_t key_length = auth_xform->key.length;
714 session->aes_cmac = 0;
715 session->auth_key_length = auth_xform->key.length;
716 session->auth_iv.offset = auth_xform->iv.offset;
717 session->auth_iv.length = auth_xform->iv.length;
718 session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
719 session->is_auth = 1;
721 switch (auth_xform->algo) {
722 case RTE_CRYPTO_AUTH_SHA1:
723 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
724 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
726 case RTE_CRYPTO_AUTH_SHA224:
727 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
728 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
730 case RTE_CRYPTO_AUTH_SHA256:
731 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
732 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
734 case RTE_CRYPTO_AUTH_SHA384:
735 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
736 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
738 case RTE_CRYPTO_AUTH_SHA512:
739 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
740 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
742 case RTE_CRYPTO_AUTH_SHA1_HMAC:
743 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
745 case RTE_CRYPTO_AUTH_SHA224_HMAC:
746 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
748 case RTE_CRYPTO_AUTH_SHA256_HMAC:
749 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
751 case RTE_CRYPTO_AUTH_SHA384_HMAC:
752 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
754 case RTE_CRYPTO_AUTH_SHA512_HMAC:
755 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
757 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
758 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
760 case RTE_CRYPTO_AUTH_AES_CMAC:
761 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
762 session->aes_cmac = 1;
764 case RTE_CRYPTO_AUTH_AES_GMAC:
765 if (qat_sym_validate_aes_key(auth_xform->key.length,
766 &session->qat_cipher_alg) != 0) {
767 QAT_LOG(ERR, "Invalid AES key size");
770 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
771 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
772 if (session->auth_iv.length == 0)
773 session->auth_iv.length = AES_GCM_J0_LEN;
775 session->is_iv12B = 1;
777 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
778 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
780 case RTE_CRYPTO_AUTH_MD5_HMAC:
781 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
783 case RTE_CRYPTO_AUTH_NULL:
784 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
786 case RTE_CRYPTO_AUTH_KASUMI_F9:
787 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
789 case RTE_CRYPTO_AUTH_ZUC_EIA3:
790 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
791 QAT_LOG(ERR, "%s not supported on this device",
792 rte_crypto_auth_algorithm_strings
796 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
798 case RTE_CRYPTO_AUTH_MD5:
799 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
800 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
804 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
809 if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
810 session->is_gmac = 1;
811 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
812 session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
813 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
815 * It needs to create cipher desc content first,
816 * then authentication
818 if (qat_sym_cd_cipher_set(session,
819 auth_xform->key.data,
820 auth_xform->key.length))
823 if (qat_sym_cd_auth_set(session,
827 auth_xform->digest_length,
831 session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
832 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
834 * It needs to create authentication desc content first,
838 if (qat_sym_cd_auth_set(session,
842 auth_xform->digest_length,
846 if (qat_sym_cd_cipher_set(session,
847 auth_xform->key.data,
848 auth_xform->key.length))
852 if (qat_sym_cd_auth_set(session,
856 auth_xform->digest_length,
861 session->digest_length = auth_xform->digest_length;
866 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
867 struct rte_crypto_sym_xform *xform,
868 struct qat_sym_session *session)
870 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
871 enum rte_crypto_auth_operation crypto_operation;
872 struct qat_sym_dev_private *internals =
873 dev->data->dev_private;
874 enum qat_device_gen qat_dev_gen =
875 internals->qat_dev->qat_dev_gen;
878 * Store AEAD IV parameters as cipher IV,
879 * to avoid unnecessary memory usage
881 session->cipher_iv.offset = xform->aead.iv.offset;
882 session->cipher_iv.length = xform->aead.iv.length;
884 session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
885 session->is_auth = 1;
886 session->digest_length = aead_xform->digest_length;
888 session->is_single_pass = 0;
889 switch (aead_xform->algo) {
890 case RTE_CRYPTO_AEAD_AES_GCM:
891 if (qat_sym_validate_aes_key(aead_xform->key.length,
892 &session->qat_cipher_alg) != 0) {
893 QAT_LOG(ERR, "Invalid AES key size");
896 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
897 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
899 if (qat_dev_gen == QAT_GEN4)
901 if (session->cipher_iv.length == 0) {
902 session->cipher_iv.length = AES_GCM_J0_LEN;
905 session->is_iv12B = 1;
906 if (qat_dev_gen < QAT_GEN3)
908 qat_sym_session_handle_single_pass(session,
911 case RTE_CRYPTO_AEAD_AES_CCM:
912 if (qat_sym_validate_aes_key(aead_xform->key.length,
913 &session->qat_cipher_alg) != 0) {
914 QAT_LOG(ERR, "Invalid AES key size");
917 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
918 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
919 if (qat_dev_gen == QAT_GEN4)
922 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
923 if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
925 session->qat_cipher_alg =
926 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
927 qat_sym_session_handle_single_pass(session,
931 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
936 if (session->is_single_pass) {
937 if (qat_sym_cd_cipher_set(session,
938 aead_xform->key.data, aead_xform->key.length))
940 } else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
941 aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
942 (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
943 aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
944 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
946 * It needs to create cipher desc content first,
947 * then authentication
949 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
950 RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
952 if (qat_sym_cd_cipher_set(session,
953 aead_xform->key.data,
954 aead_xform->key.length))
957 if (qat_sym_cd_auth_set(session,
958 aead_xform->key.data,
959 aead_xform->key.length,
960 aead_xform->aad_length,
961 aead_xform->digest_length,
965 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
967 * It needs to create authentication desc content first,
971 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
972 RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
974 if (qat_sym_cd_auth_set(session,
975 aead_xform->key.data,
976 aead_xform->key.length,
977 aead_xform->aad_length,
978 aead_xform->digest_length,
982 if (qat_sym_cd_cipher_set(session,
983 aead_xform->key.data,
984 aead_xform->key.length))
991 unsigned int qat_sym_session_get_private_size(
992 struct rte_cryptodev *dev __rte_unused)
994 return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
997 /* returns block size in bytes per cipher algo */
998 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
1000 switch (qat_cipher_alg) {
1001 case ICP_QAT_HW_CIPHER_ALGO_DES:
1002 return ICP_QAT_HW_DES_BLK_SZ;
1003 case ICP_QAT_HW_CIPHER_ALGO_3DES:
1004 return ICP_QAT_HW_3DES_BLK_SZ;
1005 case ICP_QAT_HW_CIPHER_ALGO_AES128:
1006 case ICP_QAT_HW_CIPHER_ALGO_AES192:
1007 case ICP_QAT_HW_CIPHER_ALGO_AES256:
1008 return ICP_QAT_HW_AES_BLK_SZ;
1010 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
1017 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
1018 * This is digest size rounded up to nearest quadword
1020 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1022 switch (qat_hash_alg) {
1023 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1024 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
1025 QAT_HW_DEFAULT_ALIGNMENT);
1026 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1027 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
1028 QAT_HW_DEFAULT_ALIGNMENT);
1029 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1030 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
1031 QAT_HW_DEFAULT_ALIGNMENT);
1032 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1033 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
1034 QAT_HW_DEFAULT_ALIGNMENT);
1035 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1036 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1037 QAT_HW_DEFAULT_ALIGNMENT);
1038 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1039 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
1040 QAT_HW_DEFAULT_ALIGNMENT);
1041 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1042 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1043 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1044 QAT_HW_DEFAULT_ALIGNMENT);
1045 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1046 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1047 QAT_HW_DEFAULT_ALIGNMENT);
1048 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1049 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1050 QAT_HW_DEFAULT_ALIGNMENT);
1051 case ICP_QAT_HW_AUTH_ALGO_MD5:
1052 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1053 QAT_HW_DEFAULT_ALIGNMENT);
1054 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1055 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1056 QAT_HW_DEFAULT_ALIGNMENT);
1057 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1058 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1059 QAT_HW_DEFAULT_ALIGNMENT);
1060 case ICP_QAT_HW_AUTH_ALGO_NULL:
1061 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1062 QAT_HW_DEFAULT_ALIGNMENT);
1063 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1064 /* return maximum state1 size in this case */
1065 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1066 QAT_HW_DEFAULT_ALIGNMENT);
1068 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1074 /* returns digest size in bytes per hash algo */
1075 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1077 switch (qat_hash_alg) {
1078 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1079 return ICP_QAT_HW_SHA1_STATE1_SZ;
1080 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1081 return ICP_QAT_HW_SHA224_STATE1_SZ;
1082 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1083 return ICP_QAT_HW_SHA256_STATE1_SZ;
1084 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1085 return ICP_QAT_HW_SHA384_STATE1_SZ;
1086 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1087 return ICP_QAT_HW_SHA512_STATE1_SZ;
1088 case ICP_QAT_HW_AUTH_ALGO_MD5:
1089 return ICP_QAT_HW_MD5_STATE1_SZ;
1090 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1091 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1092 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1093 /* return maximum digest size in this case */
1094 return ICP_QAT_HW_SHA512_STATE1_SZ;
1096 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1102 /* returns block size in byes per hash algo */
1103 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1105 switch (qat_hash_alg) {
1106 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1108 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1109 return SHA256_CBLOCK;
1110 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1111 return SHA256_CBLOCK;
1112 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1113 return SHA512_CBLOCK;
1114 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1115 return SHA512_CBLOCK;
1116 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1118 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1119 return ICP_QAT_HW_AES_BLK_SZ;
1120 case ICP_QAT_HW_AUTH_ALGO_MD5:
1122 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1123 /* return maximum block size in this case */
1124 return SHA512_CBLOCK;
1126 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1132 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1136 if (!SHA1_Init(&ctx))
1138 SHA1_Transform(&ctx, data_in);
1139 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1143 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1147 if (!SHA224_Init(&ctx))
1149 SHA256_Transform(&ctx, data_in);
1150 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1154 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1158 if (!SHA256_Init(&ctx))
1160 SHA256_Transform(&ctx, data_in);
1161 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1165 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1169 if (!SHA384_Init(&ctx))
1171 SHA512_Transform(&ctx, data_in);
1172 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1176 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1180 if (!SHA512_Init(&ctx))
1182 SHA512_Transform(&ctx, data_in);
1183 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1187 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1191 if (!MD5_Init(&ctx))
1193 MD5_Transform(&ctx, data_in);
1194 rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1199 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1204 uint8_t digest[qat_hash_get_digest_size(
1205 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1206 uint32_t *hash_state_out_be32;
1207 uint64_t *hash_state_out_be64;
1210 /* Initialize to avoid gcc warning */
1211 memset(digest, 0, sizeof(digest));
1213 digest_size = qat_hash_get_digest_size(hash_alg);
1214 if (digest_size <= 0)
1217 hash_state_out_be32 = (uint32_t *)data_out;
1218 hash_state_out_be64 = (uint64_t *)data_out;
1221 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1222 if (partial_hash_sha1(data_in, digest))
1224 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1225 *hash_state_out_be32 =
1226 rte_bswap32(*(((uint32_t *)digest)+i));
1228 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1229 if (partial_hash_sha224(data_in, digest))
1231 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1232 *hash_state_out_be32 =
1233 rte_bswap32(*(((uint32_t *)digest)+i));
1235 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1236 if (partial_hash_sha256(data_in, digest))
1238 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1239 *hash_state_out_be32 =
1240 rte_bswap32(*(((uint32_t *)digest)+i));
1242 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1243 if (partial_hash_sha384(data_in, digest))
1245 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1246 *hash_state_out_be64 =
1247 rte_bswap64(*(((uint64_t *)digest)+i));
1249 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1250 if (partial_hash_sha512(data_in, digest))
1252 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1253 *hash_state_out_be64 =
1254 rte_bswap64(*(((uint64_t *)digest)+i));
1256 case ICP_QAT_HW_AUTH_ALGO_MD5:
1257 if (partial_hash_md5(data_in, data_out))
1261 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1267 #define HMAC_IPAD_VALUE 0x36
1268 #define HMAC_OPAD_VALUE 0x5c
1269 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1271 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1273 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1277 derived[0] = base[0] << 1;
1278 for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1279 derived[i] = base[i] << 1;
1280 derived[i - 1] |= base[i] >> 7;
1284 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1287 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1288 const uint8_t *auth_key,
1289 uint16_t auth_keylen,
1290 uint8_t *p_state_buf,
1291 uint16_t *p_state_len,
1295 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1296 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1299 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1305 uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1308 auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1310 in = rte_zmalloc("AES CMAC K1",
1311 ICP_QAT_HW_AES_128_KEY_SZ, 16);
1314 QAT_LOG(ERR, "Failed to alloc memory");
1318 rte_memcpy(in, AES_CMAC_SEED,
1319 ICP_QAT_HW_AES_128_KEY_SZ);
1320 rte_memcpy(p_state_buf, auth_key, auth_keylen);
1322 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1328 AES_encrypt(in, k0, &enc_key);
1330 k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1331 k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1333 aes_cmac_key_derive(k0, k1);
1334 aes_cmac_key_derive(k1, k2);
1336 memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1337 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1341 static uint8_t qat_aes_xcbc_key_seed[
1342 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1343 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1344 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1345 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1346 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1347 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1348 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1352 uint8_t *out = p_state_buf;
1356 in = rte_zmalloc("working mem for key",
1357 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1359 QAT_LOG(ERR, "Failed to alloc memory");
1363 rte_memcpy(in, qat_aes_xcbc_key_seed,
1364 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1365 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1366 if (AES_set_encrypt_key(auth_key,
1370 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1372 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1373 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1376 AES_encrypt(in, out, &enc_key);
1377 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1378 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1380 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1381 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1385 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1386 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1388 uint8_t *out = p_state_buf;
1391 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1392 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1393 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1394 in = rte_zmalloc("working mem for key",
1395 ICP_QAT_HW_GALOIS_H_SZ, 16);
1397 QAT_LOG(ERR, "Failed to alloc memory");
1401 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1402 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1406 AES_encrypt(in, out, &enc_key);
1407 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1408 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1409 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1414 block_size = qat_hash_get_block_size(hash_alg);
1417 /* init ipad and opad from key and xor with fixed values */
1418 memset(ipad, 0, block_size);
1419 memset(opad, 0, block_size);
1421 if (auth_keylen > (unsigned int)block_size) {
1422 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1425 rte_memcpy(ipad, auth_key, auth_keylen);
1426 rte_memcpy(opad, auth_key, auth_keylen);
1428 for (i = 0; i < block_size; i++) {
1429 uint8_t *ipad_ptr = ipad + i;
1430 uint8_t *opad_ptr = opad + i;
1431 *ipad_ptr ^= HMAC_IPAD_VALUE;
1432 *opad_ptr ^= HMAC_OPAD_VALUE;
1435 /* do partial hash of ipad and copy to state1 */
1436 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1437 memset(ipad, 0, block_size);
1438 memset(opad, 0, block_size);
1439 QAT_LOG(ERR, "ipad precompute failed");
1444 * State len is a multiple of 8, so may be larger than the digest.
1445 * Put the partial hash of opad state_len bytes after state1
1447 *p_state_len = qat_hash_get_state1_size(hash_alg);
1448 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1449 memset(ipad, 0, block_size);
1450 memset(opad, 0, block_size);
1451 QAT_LOG(ERR, "opad precompute failed");
1455 /* don't leave data lying around */
1456 memset(ipad, 0, block_size);
1457 memset(opad, 0, block_size);
1462 qat_sym_session_init_common_hdr(struct qat_sym_session *session)
1464 struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
1465 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1466 enum qat_sym_proto_flag proto_flags = session->qat_proto_flag;
1467 uint32_t slice_flags = session->slice_types;
1470 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1471 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1472 header->service_cmd_id = session->qat_cmd;
1473 header->comn_req_flags =
1474 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1475 QAT_COMN_PTR_TYPE_FLAT);
1476 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1477 ICP_QAT_FW_LA_PARTIAL_NONE);
1478 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1479 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1481 switch (proto_flags) {
1482 case QAT_CRYPTO_PROTO_FLAG_NONE:
1483 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1484 ICP_QAT_FW_LA_NO_PROTO);
1486 case QAT_CRYPTO_PROTO_FLAG_CCM:
1487 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1488 ICP_QAT_FW_LA_CCM_PROTO);
1490 case QAT_CRYPTO_PROTO_FLAG_GCM:
1491 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1492 ICP_QAT_FW_LA_GCM_PROTO);
1494 case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1495 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1496 ICP_QAT_FW_LA_SNOW_3G_PROTO);
1498 case QAT_CRYPTO_PROTO_FLAG_ZUC:
1499 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1500 ICP_QAT_FW_LA_ZUC_3G_PROTO);
1504 /* More than one of the following flags can be set at once */
1505 if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_SPC)) {
1506 ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
1507 header->serv_specif_flags,
1508 ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
1510 if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_UCS)) {
1511 ICP_QAT_FW_LA_SLICE_TYPE_SET(
1512 header->serv_specif_flags,
1513 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
1516 if (session->is_auth) {
1517 if (session->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1518 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1519 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1520 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1521 ICP_QAT_FW_LA_CMP_AUTH_RES);
1522 } else if (session->auth_op == ICP_QAT_HW_AUTH_GENERATE) {
1523 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1524 ICP_QAT_FW_LA_RET_AUTH_RES);
1525 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1526 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1529 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1530 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1531 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1532 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1535 if (session->is_iv12B) {
1536 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1537 header->serv_specif_flags,
1538 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1541 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1542 ICP_QAT_FW_LA_NO_UPDATE_STATE);
1543 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1544 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1547 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
1548 const uint8_t *cipherkey,
1549 uint32_t cipherkeylen)
1551 struct icp_qat_hw_cipher_algo_blk *cipher;
1552 struct icp_qat_hw_cipher_algo_blk20 *cipher20;
1553 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1554 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1555 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1556 void *ptr = &req_tmpl->cd_ctrl;
1557 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1558 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1559 enum icp_qat_hw_cipher_convert key_convert;
1560 struct icp_qat_fw_la_cipher_20_req_params *req_ucs =
1561 (struct icp_qat_fw_la_cipher_20_req_params *)
1562 &cdesc->fw_req.serv_specif_rqpars;
1563 struct icp_qat_fw_la_cipher_req_params *req_cipher =
1564 (struct icp_qat_fw_la_cipher_req_params *)
1565 &cdesc->fw_req.serv_specif_rqpars;
1566 uint32_t total_key_size;
1567 uint16_t cipher_offset, cd_size;
1568 uint32_t wordIndex = 0;
1569 uint32_t *temp_key = NULL;
1571 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1572 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1573 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1574 ICP_QAT_FW_SLICE_CIPHER);
1575 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1576 ICP_QAT_FW_SLICE_DRAM_WR);
1577 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1578 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1579 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1580 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1581 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1582 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1583 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1584 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1585 ICP_QAT_FW_SLICE_CIPHER);
1586 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1587 ICP_QAT_FW_SLICE_AUTH);
1588 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1589 ICP_QAT_FW_SLICE_AUTH);
1590 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1591 ICP_QAT_FW_SLICE_DRAM_WR);
1592 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1593 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1594 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1598 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1600 * CTR Streaming ciphers are a special case. Decrypt = encrypt
1601 * Overriding default values previously set.
1602 * Chacha20-Poly1305 is special case, CTR but single-pass
1603 * so both direction need to be used.
1605 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1606 if (cdesc->qat_cipher_alg ==
1607 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 &&
1608 cdesc->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1609 cdesc->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
1611 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1612 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1613 || cdesc->qat_cipher_alg ==
1614 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1615 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1616 else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1617 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1618 else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE)
1619 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1621 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1623 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1624 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1625 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1626 cipher_cd_ctrl->cipher_state_sz =
1627 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1628 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1630 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1631 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1632 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1633 cipher_cd_ctrl->cipher_padding_sz =
1634 (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1635 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1636 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1637 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1638 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1639 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1640 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1641 } else if (cdesc->qat_cipher_alg ==
1642 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1643 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1644 ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1645 cipher_cd_ctrl->cipher_state_sz =
1646 ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1647 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1648 cdesc->min_qat_dev_gen = QAT_GEN2;
1650 total_key_size = cipherkeylen;
1651 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1653 cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1654 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1656 cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1657 cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
1658 cipher->cipher_config.val =
1659 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1660 cdesc->qat_cipher_alg, key_convert,
1663 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1664 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1665 sizeof(struct icp_qat_hw_cipher_config)
1667 memcpy(cipher->key, cipherkey, cipherkeylen);
1668 memcpy(temp_key, cipherkey, cipherkeylen);
1670 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1671 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1673 temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1675 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1676 cipherkeylen + cipherkeylen;
1677 } else if (cdesc->is_ucs) {
1678 const uint8_t *final_key = cipherkey;
1680 cdesc->slice_types |= QAT_CRYPTO_SLICE_UCS;
1681 total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
1682 ICP_QAT_HW_AES_128_KEY_SZ);
1683 cipher20->cipher_config.reserved[0] = 0;
1684 cipher20->cipher_config.reserved[1] = 0;
1685 cipher20->cipher_config.reserved[2] = 0;
1687 rte_memcpy(cipher20->key, final_key, cipherkeylen);
1688 cdesc->cd_cur_ptr +=
1689 sizeof(struct icp_qat_hw_ucs_cipher_config) +
1692 memcpy(cipher->key, cipherkey, cipherkeylen);
1693 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1697 if (cdesc->is_single_pass) {
1698 QAT_FIELD_SET(cipher->cipher_config.val,
1699 cdesc->digest_length,
1700 QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
1701 QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
1702 /* UCS and SPC 1.8/2.0 share configuration of 2nd config word */
1703 cdesc->cd.cipher.cipher_config.reserved =
1704 ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
1706 cdesc->slice_types |= QAT_CRYPTO_SLICE_SPC;
1709 if (total_key_size > cipherkeylen) {
1710 uint32_t padding_size = total_key_size-cipherkeylen;
1711 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1712 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1713 /* K3 not provided so use K1 = K3*/
1714 memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1715 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1716 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1717 /* K2 and K3 not provided so use K1 = K2 = K3*/
1718 memcpy(cdesc->cd_cur_ptr, cipherkey,
1720 memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1721 cipherkey, cipherkeylen);
1723 memset(cdesc->cd_cur_ptr, 0, padding_size);
1725 cdesc->cd_cur_ptr += padding_size;
1727 if (cdesc->is_ucs) {
1729 * These values match in terms of position auth
1730 * slice request fields
1732 req_ucs->spc_auth_res_sz = cdesc->digest_length;
1733 if (!cdesc->is_gmac) {
1734 req_ucs->spc_aad_sz = cdesc->aad_len;
1735 req_ucs->spc_aad_offset = 0;
1737 } else if (cdesc->is_single_pass) {
1738 req_cipher->spc_aad_sz = cdesc->aad_len;
1739 req_cipher->spc_auth_res_sz = cdesc->digest_length;
1741 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1742 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1743 cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1748 int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
1749 const uint8_t *authkey,
1750 uint32_t authkeylen,
1751 uint32_t aad_length,
1752 uint32_t digestsize,
1753 unsigned int operation)
1755 struct icp_qat_hw_auth_setup *hash;
1756 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1757 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1758 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1759 void *ptr = &req_tmpl->cd_ctrl;
1760 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1761 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1762 struct icp_qat_fw_la_auth_req_params *auth_param =
1763 (struct icp_qat_fw_la_auth_req_params *)
1764 ((char *)&req_tmpl->serv_specif_rqpars +
1765 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1766 uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1767 uint16_t hash_offset, cd_size;
1768 uint32_t *aad_len = NULL;
1769 uint32_t wordIndex = 0;
1772 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1773 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1774 ICP_QAT_FW_SLICE_AUTH);
1775 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1776 ICP_QAT_FW_SLICE_DRAM_WR);
1777 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1778 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1779 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1780 ICP_QAT_FW_SLICE_AUTH);
1781 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1782 ICP_QAT_FW_SLICE_CIPHER);
1783 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1784 ICP_QAT_FW_SLICE_CIPHER);
1785 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1786 ICP_QAT_FW_SLICE_DRAM_WR);
1787 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1788 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1789 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1793 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY)
1794 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
1796 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
1799 * Setup the inner hash config
1801 hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1802 hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
1803 hash->auth_config.reserved = 0;
1804 hash->auth_config.config =
1805 ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
1806 cdesc->qat_hash_alg, digestsize);
1808 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
1809 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
1810 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
1811 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
1812 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
1813 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
1814 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
1816 hash->auth_counter.counter = 0;
1818 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
1822 hash->auth_counter.counter = rte_bswap32(block_size);
1825 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
1828 * cd_cur_ptr now points at the state1 information.
1830 switch (cdesc->qat_hash_alg) {
1831 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1832 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1834 rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
1835 sizeof(sha1InitialState));
1836 state1_size = qat_hash_get_state1_size(
1837 cdesc->qat_hash_alg);
1841 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
1842 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1844 QAT_LOG(ERR, "(SHA)precompute failed");
1847 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
1849 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1850 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1852 rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
1853 sizeof(sha224InitialState));
1854 state1_size = qat_hash_get_state1_size(
1855 cdesc->qat_hash_alg);
1859 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
1860 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1862 QAT_LOG(ERR, "(SHA)precompute failed");
1865 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
1867 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1868 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1870 rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
1871 sizeof(sha256InitialState));
1872 state1_size = qat_hash_get_state1_size(
1873 cdesc->qat_hash_alg);
1877 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
1878 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1880 QAT_LOG(ERR, "(SHA)precompute failed");
1883 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
1885 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1886 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1888 rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
1889 sizeof(sha384InitialState));
1890 state1_size = qat_hash_get_state1_size(
1891 cdesc->qat_hash_alg);
1895 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
1896 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1898 QAT_LOG(ERR, "(SHA)precompute failed");
1901 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
1903 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1904 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1906 rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
1907 sizeof(sha512InitialState));
1908 state1_size = qat_hash_get_state1_size(
1909 cdesc->qat_hash_alg);
1913 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
1914 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1916 QAT_LOG(ERR, "(SHA)precompute failed");
1919 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
1921 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1922 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1924 if (cdesc->aes_cmac)
1925 memset(cdesc->cd_cur_ptr, 0, state1_size);
1926 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
1927 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
1928 &state2_size, cdesc->aes_cmac)) {
1929 cdesc->aes_cmac ? QAT_LOG(ERR,
1930 "(CMAC)precompute failed")
1932 "(XCBC)precompute failed");
1936 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1937 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1938 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1939 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
1940 if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
1941 authkeylen, cdesc->cd_cur_ptr + state1_size,
1942 &state2_size, cdesc->aes_cmac)) {
1943 QAT_LOG(ERR, "(GCM)precompute failed");
1947 * Write (the length of AAD) into bytes 16-19 of state2
1948 * in big-endian format. This field is 8 bytes
1950 auth_param->u2.aad_sz =
1951 RTE_ALIGN_CEIL(aad_length, 16);
1952 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
1954 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
1955 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
1956 ICP_QAT_HW_GALOIS_H_SZ);
1957 *aad_len = rte_bswap32(aad_length);
1958 cdesc->aad_len = aad_length;
1960 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1961 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1962 state1_size = qat_hash_get_state1_size(
1963 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
1964 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
1965 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1967 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
1968 (cdesc->cd_cur_ptr + state1_size + state2_size);
1969 cipherconfig->cipher_config.val =
1970 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
1971 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
1972 ICP_QAT_HW_CIPHER_KEY_CONVERT,
1973 ICP_QAT_HW_CIPHER_ENCRYPT);
1974 memcpy(cipherconfig->key, authkey, authkeylen);
1975 memset(cipherconfig->key + authkeylen,
1976 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
1977 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
1978 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1979 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1981 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1982 hash->auth_config.config =
1983 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
1984 cdesc->qat_hash_alg, digestsize);
1985 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1986 state1_size = qat_hash_get_state1_size(
1987 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
1988 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
1989 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
1990 + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
1992 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1993 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1994 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1995 cdesc->min_qat_dev_gen = QAT_GEN2;
1998 case ICP_QAT_HW_AUTH_ALGO_MD5:
1999 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
2000 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2002 QAT_LOG(ERR, "(MD5)precompute failed");
2005 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
2007 case ICP_QAT_HW_AUTH_ALGO_NULL:
2008 state1_size = qat_hash_get_state1_size(
2009 ICP_QAT_HW_AUTH_ALGO_NULL);
2010 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
2012 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
2013 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
2014 state1_size = qat_hash_get_state1_size(
2015 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
2016 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
2017 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
2019 if (aad_length > 0) {
2020 aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
2021 ICP_QAT_HW_CCM_AAD_LEN_INFO;
2022 auth_param->u2.aad_sz =
2023 RTE_ALIGN_CEIL(aad_length,
2024 ICP_QAT_HW_CCM_AAD_ALIGNMENT);
2026 auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
2028 cdesc->aad_len = aad_length;
2029 hash->auth_counter.counter = 0;
2031 hash_cd_ctrl->outer_prefix_sz = digestsize;
2032 auth_param->hash_state_sz = digestsize;
2034 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2036 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
2037 state1_size = qat_hash_get_state1_size(
2038 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
2039 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
2040 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
2041 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
2044 * The Inner Hash Initial State2 block must contain IK
2045 * (Initialisation Key), followed by IK XOR-ed with KM
2046 * (Key Modifier): IK||(IK^KM).
2048 /* write the auth key */
2049 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2050 /* initialise temp key with auth key */
2051 memcpy(pTempKey, authkey, authkeylen);
2052 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
2053 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
2054 pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
2057 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
2061 /* Auth CD config setup */
2062 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
2063 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
2064 hash_cd_ctrl->inner_res_sz = digestsize;
2065 hash_cd_ctrl->final_sz = digestsize;
2066 hash_cd_ctrl->inner_state1_sz = state1_size;
2067 auth_param->auth_res_sz = digestsize;
2069 hash_cd_ctrl->inner_state2_sz = state2_size;
2070 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2071 ((sizeof(struct icp_qat_hw_auth_setup) +
2072 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2075 cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2076 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2078 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2079 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2084 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2087 case ICP_QAT_HW_AES_128_KEY_SZ:
2088 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2090 case ICP_QAT_HW_AES_192_KEY_SZ:
2091 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2093 case ICP_QAT_HW_AES_256_KEY_SZ:
2094 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2102 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2103 enum icp_qat_hw_cipher_algo *alg)
2106 case ICP_QAT_HW_AES_128_KEY_SZ:
2107 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2109 case ICP_QAT_HW_AES_256_KEY_SZ:
2110 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2118 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2121 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2122 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2130 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2133 case ICP_QAT_HW_KASUMI_KEY_SZ:
2134 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2142 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2145 case ICP_QAT_HW_DES_KEY_SZ:
2146 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2154 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2157 case QAT_3DES_KEY_SZ_OPT1:
2158 case QAT_3DES_KEY_SZ_OPT2:
2159 case QAT_3DES_KEY_SZ_OPT3:
2160 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2168 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2171 case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2172 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2180 #ifdef RTE_LIB_SECURITY
2182 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2184 struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2185 struct rte_security_docsis_xform *docsis = &conf->docsis;
2187 /* CRC generate -> Cipher encrypt */
2188 if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2190 if (crypto_sym != NULL &&
2191 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2192 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2193 crypto_sym->cipher.algo ==
2194 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2195 (crypto_sym->cipher.key.length ==
2196 ICP_QAT_HW_AES_128_KEY_SZ ||
2197 crypto_sym->cipher.key.length ==
2198 ICP_QAT_HW_AES_256_KEY_SZ) &&
2199 crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2200 crypto_sym->next == NULL) {
2203 /* Cipher decrypt -> CRC verify */
2204 } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2206 if (crypto_sym != NULL &&
2207 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2208 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2209 crypto_sym->cipher.algo ==
2210 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2211 (crypto_sym->cipher.key.length ==
2212 ICP_QAT_HW_AES_128_KEY_SZ ||
2213 crypto_sym->cipher.key.length ==
2214 ICP_QAT_HW_AES_256_KEY_SZ) &&
2215 crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2216 crypto_sym->next == NULL) {
2225 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2226 struct rte_security_session_conf *conf, void *session_private)
2230 struct rte_crypto_sym_xform *xform = NULL;
2231 struct qat_sym_session *session = session_private;
2233 /* Clear the session */
2234 memset(session, 0, qat_sym_session_get_private_size(dev));
2236 ret = qat_sec_session_check_docsis(conf);
2238 QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2242 xform = conf->crypto_xform;
2244 /* Verify the session physical address is known */
2245 rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2246 if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2248 "Session physical address unknown. Bad memory pool.");
2252 /* Set context descriptor physical address */
2253 session->cd_paddr = session_paddr +
2254 offsetof(struct qat_sym_session, cd);
2256 session->min_qat_dev_gen = QAT_GEN1;
2258 /* Get requested QAT command id - should be cipher */
2259 qat_cmd_id = qat_get_cmd_id(xform);
2260 if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2261 QAT_LOG(ERR, "Unsupported xform chain requested");
2264 session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2266 ret = qat_sym_session_configure_cipher(dev, xform, session);
2269 qat_sym_session_finalize(session);
2275 qat_security_session_create(void *dev,
2276 struct rte_security_session_conf *conf,
2277 struct rte_security_session *sess,
2278 struct rte_mempool *mempool)
2280 void *sess_private_data;
2281 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2284 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2285 conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2286 QAT_LOG(ERR, "Invalid security protocol");
2290 if (rte_mempool_get(mempool, &sess_private_data)) {
2291 QAT_LOG(ERR, "Couldn't get object from session mempool");
2295 ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2298 QAT_LOG(ERR, "Failed to configure session parameters");
2299 /* Return session to mempool */
2300 rte_mempool_put(mempool, sess_private_data);
2304 set_sec_session_private_data(sess, sess_private_data);
2310 qat_security_session_destroy(void *dev __rte_unused,
2311 struct rte_security_session *sess)
2313 void *sess_priv = get_sec_session_private_data(sess);
2314 struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2318 bpi_cipher_ctx_free(s->bpi_ctx);
2319 memset(s, 0, qat_sym_session_get_private_size(dev));
2320 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2322 set_sec_session_private_data(sess, NULL);
2323 rte_mempool_put(sess_mp, sess_priv);