1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 * Copyright(c) 2015-2019 Intel Corporation
5 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
6 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
7 #include <openssl/md5.h> /* Needed to calculate pre-compute values */
8 #include <openssl/evp.h> /* Needed for bpi runt block processing */
10 #include <rte_memcpy.h>
11 #include <rte_common.h>
12 #include <rte_spinlock.h>
13 #include <rte_byteorder.h>
15 #include <rte_malloc.h>
16 #include <rte_crypto_sym.h>
17 #ifdef RTE_LIB_SECURITY
18 #include <rte_security.h>
22 #include "qat_sym_session.h"
23 #include "qat_sym_pmd.h"
25 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
26 static const uint8_t sha1InitialState[] = {
27 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
28 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
30 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
31 static const uint8_t sha224InitialState[] = {
32 0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
33 0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
34 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
36 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
37 static const uint8_t sha256InitialState[] = {
38 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
39 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
40 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
42 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
43 static const uint8_t sha384InitialState[] = {
44 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
45 0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
46 0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
47 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
48 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
49 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
51 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
52 static const uint8_t sha512InitialState[] = {
53 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
54 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
55 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
56 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
57 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
58 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
61 qat_sym_cd_cipher_set(struct qat_sym_session *cd,
62 const uint8_t *enckey,
66 qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
67 const uint8_t *authkey,
71 unsigned int operation);
73 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
75 /* Req/cd init functions */
78 qat_sym_session_finalize(struct qat_sym_session *session)
80 qat_sym_session_init_common_hdr(session);
83 /** Frees a context previously created
84 * Depends on openssl libcrypto
87 bpi_cipher_ctx_free(void *bpi_ctx)
90 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
93 /** Creates a context in either AES or DES in ECB mode
94 * Depends on openssl libcrypto
97 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
98 enum rte_crypto_cipher_operation direction __rte_unused,
99 const uint8_t *key, uint16_t key_length, void **ctx)
101 const EVP_CIPHER *algo = NULL;
103 *ctx = EVP_CIPHER_CTX_new();
110 if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
111 algo = EVP_des_ecb();
113 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
114 algo = EVP_aes_128_ecb();
116 algo = EVP_aes_256_ecb();
118 /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
119 if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
128 EVP_CIPHER_CTX_free(*ctx);
133 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
134 struct qat_sym_dev_private *internals)
137 const struct rte_cryptodev_capabilities *capability;
139 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
140 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
141 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
144 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
147 if (capability->sym.cipher.algo == algo)
154 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
155 struct qat_sym_dev_private *internals)
158 const struct rte_cryptodev_capabilities *capability;
160 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
161 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
162 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
165 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
168 if (capability->sym.auth.algo == algo)
175 qat_sym_session_clear(struct rte_cryptodev *dev,
176 struct rte_cryptodev_sym_session *sess)
178 uint8_t index = dev->driver_id;
179 void *sess_priv = get_sym_session_private_data(sess, index);
180 struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
184 bpi_cipher_ctx_free(s->bpi_ctx);
185 memset(s, 0, qat_sym_session_get_private_size(dev));
186 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
188 set_sym_session_private_data(sess, index, NULL);
189 rte_mempool_put(sess_mp, sess_priv);
194 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
197 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
198 return ICP_QAT_FW_LA_CMD_CIPHER;
200 /* Authentication Only */
201 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
202 return ICP_QAT_FW_LA_CMD_AUTH;
205 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
206 /* AES-GCM and AES-CCM works with different direction
207 * GCM first encrypts and generate hash where AES-CCM
208 * first generate hash and encrypts. Similar relation
209 * applies to decryption.
211 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
212 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
213 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
215 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
217 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
218 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
220 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
223 if (xform->next == NULL)
226 /* Cipher then Authenticate */
227 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
228 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
229 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
231 /* Authenticate then Cipher */
232 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
233 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
234 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
239 static struct rte_crypto_auth_xform *
240 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
243 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
252 static struct rte_crypto_cipher_xform *
253 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
256 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
257 return &xform->cipher;
266 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
267 struct rte_crypto_sym_xform *xform,
268 struct qat_sym_session *session)
270 struct qat_sym_dev_private *internals = dev->data->dev_private;
271 struct rte_crypto_cipher_xform *cipher_xform = NULL;
272 enum qat_device_gen qat_dev_gen =
273 internals->qat_dev->qat_dev_gen;
276 /* Get cipher xform from crypto xform chain */
277 cipher_xform = qat_get_cipher_xform(xform);
279 session->cipher_iv.offset = cipher_xform->iv.offset;
280 session->cipher_iv.length = cipher_xform->iv.length;
282 switch (cipher_xform->algo) {
283 case RTE_CRYPTO_CIPHER_AES_CBC:
284 if (qat_sym_validate_aes_key(cipher_xform->key.length,
285 &session->qat_cipher_alg) != 0) {
286 QAT_LOG(ERR, "Invalid AES cipher key size");
290 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
292 case RTE_CRYPTO_CIPHER_AES_CTR:
293 if (qat_sym_validate_aes_key(cipher_xform->key.length,
294 &session->qat_cipher_alg) != 0) {
295 QAT_LOG(ERR, "Invalid AES cipher key size");
299 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
300 if (qat_dev_gen == QAT_GEN4)
303 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
304 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
305 &session->qat_cipher_alg) != 0) {
306 QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
310 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
312 case RTE_CRYPTO_CIPHER_NULL:
313 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
314 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
316 case RTE_CRYPTO_CIPHER_KASUMI_F8:
317 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
318 &session->qat_cipher_alg) != 0) {
319 QAT_LOG(ERR, "Invalid KASUMI cipher key size");
323 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
325 case RTE_CRYPTO_CIPHER_3DES_CBC:
326 if (qat_sym_validate_3des_key(cipher_xform->key.length,
327 &session->qat_cipher_alg) != 0) {
328 QAT_LOG(ERR, "Invalid 3DES cipher key size");
332 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
334 case RTE_CRYPTO_CIPHER_DES_CBC:
335 if (qat_sym_validate_des_key(cipher_xform->key.length,
336 &session->qat_cipher_alg) != 0) {
337 QAT_LOG(ERR, "Invalid DES cipher key size");
341 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
343 case RTE_CRYPTO_CIPHER_3DES_CTR:
344 if (qat_sym_validate_3des_key(cipher_xform->key.length,
345 &session->qat_cipher_alg) != 0) {
346 QAT_LOG(ERR, "Invalid 3DES cipher key size");
350 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
352 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
353 ret = bpi_cipher_ctx_init(
356 cipher_xform->key.data,
357 cipher_xform->key.length,
360 QAT_LOG(ERR, "failed to create DES BPI ctx");
363 if (qat_sym_validate_des_key(cipher_xform->key.length,
364 &session->qat_cipher_alg) != 0) {
365 QAT_LOG(ERR, "Invalid DES cipher key size");
369 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
371 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
372 ret = bpi_cipher_ctx_init(
375 cipher_xform->key.data,
376 cipher_xform->key.length,
379 QAT_LOG(ERR, "failed to create AES BPI ctx");
382 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
383 &session->qat_cipher_alg) != 0) {
384 QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
388 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
390 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
391 if (!qat_is_cipher_alg_supported(
392 cipher_xform->algo, internals)) {
393 QAT_LOG(ERR, "%s not supported on this device",
394 rte_crypto_cipher_algorithm_strings
395 [cipher_xform->algo]);
399 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
400 &session->qat_cipher_alg) != 0) {
401 QAT_LOG(ERR, "Invalid ZUC cipher key size");
405 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
407 case RTE_CRYPTO_CIPHER_AES_XTS:
408 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
409 QAT_LOG(ERR, "AES-XTS-192 not supported");
413 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
414 &session->qat_cipher_alg) != 0) {
415 QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
419 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
421 case RTE_CRYPTO_CIPHER_3DES_ECB:
422 case RTE_CRYPTO_CIPHER_AES_ECB:
423 case RTE_CRYPTO_CIPHER_AES_F8:
424 case RTE_CRYPTO_CIPHER_ARC4:
425 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
430 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
436 if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
437 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
439 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
441 if (qat_sym_cd_cipher_set(session,
442 cipher_xform->key.data,
443 cipher_xform->key.length)) {
451 if (session->bpi_ctx) {
452 bpi_cipher_ctx_free(session->bpi_ctx);
453 session->bpi_ctx = NULL;
459 qat_sym_session_configure(struct rte_cryptodev *dev,
460 struct rte_crypto_sym_xform *xform,
461 struct rte_cryptodev_sym_session *sess,
462 struct rte_mempool *mempool)
464 void *sess_private_data;
467 if (rte_mempool_get(mempool, &sess_private_data)) {
469 "Couldn't get object from session mempool");
473 ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
476 "Crypto QAT PMD: failed to configure session parameters");
478 /* Return session to mempool */
479 rte_mempool_put(mempool, sess_private_data);
483 set_sym_session_private_data(sess, dev->driver_id,
490 qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
493 struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
494 struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
495 (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
496 session->fw_req.cd_ctrl.content_desc_ctrl_lw;
498 /* Set the Use Extended Protocol Flags bit in LW 1 */
499 QAT_FIELD_SET(header->comn_req_flags,
500 QAT_COMN_EXT_FLAGS_USED,
501 QAT_COMN_EXT_FLAGS_BITPOS,
502 QAT_COMN_EXT_FLAGS_MASK);
504 /* Set Hash Flags in LW 28 */
505 cd_ctrl->hash_flags |= hash_flag;
507 /* Set proto flags in LW 1 */
508 switch (session->qat_cipher_alg) {
509 case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
510 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
511 ICP_QAT_FW_LA_SNOW_3G_PROTO);
512 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
513 header->serv_specif_flags, 0);
515 case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
516 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
517 ICP_QAT_FW_LA_NO_PROTO);
518 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
519 header->serv_specif_flags,
520 ICP_QAT_FW_LA_ZUC_3G_PROTO);
523 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
524 ICP_QAT_FW_LA_NO_PROTO);
525 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
526 header->serv_specif_flags, 0);
532 qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
533 struct qat_sym_session *session)
535 const struct qat_sym_dev_private *qat_private = dev->data->dev_private;
536 enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
537 QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
539 if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
540 session->qat_cipher_alg !=
541 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
542 session->min_qat_dev_gen = min_dev_gen;
543 qat_sym_session_set_ext_hash_flags(session,
544 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
545 } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
546 session->qat_cipher_alg !=
547 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
548 session->min_qat_dev_gen = min_dev_gen;
549 qat_sym_session_set_ext_hash_flags(session,
550 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
551 } else if ((session->aes_cmac ||
552 session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
553 (session->qat_cipher_alg ==
554 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
555 session->qat_cipher_alg ==
556 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
557 session->min_qat_dev_gen = min_dev_gen;
558 qat_sym_session_set_ext_hash_flags(session, 0);
563 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
564 struct rte_crypto_sym_xform *xform, void *session_private)
566 struct qat_sym_session *session = session_private;
567 struct qat_sym_dev_private *internals = dev->data->dev_private;
568 enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
571 int handle_mixed = 0;
573 /* Verify the session physical address is known */
574 rte_iova_t session_paddr = rte_mempool_virt2iova(session);
575 if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
577 "Session physical address unknown. Bad memory pool.");
581 memset(session, 0, sizeof(*session));
582 /* Set context descriptor physical address */
583 session->cd_paddr = session_paddr +
584 offsetof(struct qat_sym_session, cd);
586 session->min_qat_dev_gen = QAT_GEN1;
587 session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
590 /* Get requested QAT command id */
591 qat_cmd_id = qat_get_cmd_id(xform);
592 if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
593 QAT_LOG(ERR, "Unsupported xform chain requested");
596 session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
597 switch (session->qat_cmd) {
598 case ICP_QAT_FW_LA_CMD_CIPHER:
599 ret = qat_sym_session_configure_cipher(dev, xform, session);
603 case ICP_QAT_FW_LA_CMD_AUTH:
604 ret = qat_sym_session_configure_auth(dev, xform, session);
607 session->is_single_pass_gmac =
608 qat_dev_gen == QAT_GEN3 &&
609 xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
610 xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
612 case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
613 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
614 ret = qat_sym_session_configure_aead(dev, xform,
619 ret = qat_sym_session_configure_cipher(dev,
623 ret = qat_sym_session_configure_auth(dev,
630 case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
631 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
632 ret = qat_sym_session_configure_aead(dev, xform,
637 ret = qat_sym_session_configure_auth(dev,
641 ret = qat_sym_session_configure_cipher(dev,
648 case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
649 case ICP_QAT_FW_LA_CMD_TRNG_TEST:
650 case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
651 case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
652 case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
653 case ICP_QAT_FW_LA_CMD_MGF1:
654 case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
655 case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
656 case ICP_QAT_FW_LA_CMD_DELIMITER:
657 QAT_LOG(ERR, "Unsupported Service %u",
661 QAT_LOG(ERR, "Unsupported Service %u",
665 qat_sym_session_finalize(session);
667 /* Special handling of mixed hash+cipher algorithms */
668 qat_sym_session_handle_mixed(dev, session);
675 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
676 const struct rte_crypto_aead_xform *aead_xform)
678 session->is_single_pass = 1;
679 session->is_auth = 1;
680 session->min_qat_dev_gen = QAT_GEN3;
681 session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
682 /* Chacha-Poly is special case that use QAT CTR mode */
683 if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
684 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
686 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
688 session->cipher_iv.offset = aead_xform->iv.offset;
689 session->cipher_iv.length = aead_xform->iv.length;
690 session->aad_len = aead_xform->aad_length;
691 session->digest_length = aead_xform->digest_length;
693 if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
694 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
695 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
697 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
698 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
705 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
706 struct rte_crypto_sym_xform *xform,
707 struct qat_sym_session *session)
709 struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
710 struct qat_sym_dev_private *internals = dev->data->dev_private;
711 const uint8_t *key_data = auth_xform->key.data;
712 uint8_t key_length = auth_xform->key.length;
713 enum qat_device_gen qat_dev_gen =
714 internals->qat_dev->qat_dev_gen;
716 session->aes_cmac = 0;
717 session->auth_key_length = auth_xform->key.length;
718 session->auth_iv.offset = auth_xform->iv.offset;
719 session->auth_iv.length = auth_xform->iv.length;
720 session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
721 session->is_auth = 1;
722 session->digest_length = auth_xform->digest_length;
724 switch (auth_xform->algo) {
725 case RTE_CRYPTO_AUTH_SHA1:
726 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
727 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
729 case RTE_CRYPTO_AUTH_SHA224:
730 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
731 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
733 case RTE_CRYPTO_AUTH_SHA256:
734 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
735 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
737 case RTE_CRYPTO_AUTH_SHA384:
738 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
739 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
741 case RTE_CRYPTO_AUTH_SHA512:
742 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
743 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
745 case RTE_CRYPTO_AUTH_SHA1_HMAC:
746 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
748 case RTE_CRYPTO_AUTH_SHA224_HMAC:
749 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
751 case RTE_CRYPTO_AUTH_SHA256_HMAC:
752 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
754 case RTE_CRYPTO_AUTH_SHA384_HMAC:
755 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
757 case RTE_CRYPTO_AUTH_SHA512_HMAC:
758 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
760 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
761 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
763 case RTE_CRYPTO_AUTH_AES_CMAC:
764 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
765 session->aes_cmac = 1;
767 case RTE_CRYPTO_AUTH_AES_GMAC:
768 if (qat_sym_validate_aes_key(auth_xform->key.length,
769 &session->qat_cipher_alg) != 0) {
770 QAT_LOG(ERR, "Invalid AES key size");
773 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
774 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
775 if (session->auth_iv.length == 0)
776 session->auth_iv.length = AES_GCM_J0_LEN;
778 session->is_iv12B = 1;
779 if (qat_dev_gen == QAT_GEN4) {
780 session->is_cnt_zero = 1;
784 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
785 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
787 case RTE_CRYPTO_AUTH_MD5_HMAC:
788 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
790 case RTE_CRYPTO_AUTH_NULL:
791 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
793 case RTE_CRYPTO_AUTH_KASUMI_F9:
794 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
796 case RTE_CRYPTO_AUTH_ZUC_EIA3:
797 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
798 QAT_LOG(ERR, "%s not supported on this device",
799 rte_crypto_auth_algorithm_strings
803 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
805 case RTE_CRYPTO_AUTH_MD5:
806 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
807 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
811 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
816 if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
817 session->is_gmac = 1;
818 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
819 session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
820 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
822 * It needs to create cipher desc content first,
823 * then authentication
825 if (qat_sym_cd_cipher_set(session,
826 auth_xform->key.data,
827 auth_xform->key.length))
830 if (qat_sym_cd_auth_set(session,
834 auth_xform->digest_length,
838 session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
839 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
841 * It needs to create authentication desc content first,
845 if (qat_sym_cd_auth_set(session,
849 auth_xform->digest_length,
853 if (qat_sym_cd_cipher_set(session,
854 auth_xform->key.data,
855 auth_xform->key.length))
859 if (qat_sym_cd_auth_set(session,
863 auth_xform->digest_length,
872 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
873 struct rte_crypto_sym_xform *xform,
874 struct qat_sym_session *session)
876 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
877 enum rte_crypto_auth_operation crypto_operation;
878 struct qat_sym_dev_private *internals =
879 dev->data->dev_private;
880 enum qat_device_gen qat_dev_gen =
881 internals->qat_dev->qat_dev_gen;
884 * Store AEAD IV parameters as cipher IV,
885 * to avoid unnecessary memory usage
887 session->cipher_iv.offset = xform->aead.iv.offset;
888 session->cipher_iv.length = xform->aead.iv.length;
890 session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
891 session->is_auth = 1;
892 session->digest_length = aead_xform->digest_length;
894 session->is_single_pass = 0;
895 switch (aead_xform->algo) {
896 case RTE_CRYPTO_AEAD_AES_GCM:
897 if (qat_sym_validate_aes_key(aead_xform->key.length,
898 &session->qat_cipher_alg) != 0) {
899 QAT_LOG(ERR, "Invalid AES key size");
902 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
903 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
905 if (qat_dev_gen == QAT_GEN4)
907 if (session->cipher_iv.length == 0) {
908 session->cipher_iv.length = AES_GCM_J0_LEN;
911 session->is_iv12B = 1;
912 if (qat_dev_gen < QAT_GEN3)
914 qat_sym_session_handle_single_pass(session,
917 case RTE_CRYPTO_AEAD_AES_CCM:
918 if (qat_sym_validate_aes_key(aead_xform->key.length,
919 &session->qat_cipher_alg) != 0) {
920 QAT_LOG(ERR, "Invalid AES key size");
923 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
924 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
925 if (qat_dev_gen == QAT_GEN4)
928 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
929 if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
931 if (qat_dev_gen == QAT_GEN4)
933 session->qat_cipher_alg =
934 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
935 qat_sym_session_handle_single_pass(session,
939 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
944 if (session->is_single_pass) {
945 if (qat_sym_cd_cipher_set(session,
946 aead_xform->key.data, aead_xform->key.length))
948 } else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
949 aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
950 (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
951 aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
952 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
954 * It needs to create cipher desc content first,
955 * then authentication
957 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
958 RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
960 if (qat_sym_cd_cipher_set(session,
961 aead_xform->key.data,
962 aead_xform->key.length))
965 if (qat_sym_cd_auth_set(session,
966 aead_xform->key.data,
967 aead_xform->key.length,
968 aead_xform->aad_length,
969 aead_xform->digest_length,
973 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
975 * It needs to create authentication desc content first,
979 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
980 RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
982 if (qat_sym_cd_auth_set(session,
983 aead_xform->key.data,
984 aead_xform->key.length,
985 aead_xform->aad_length,
986 aead_xform->digest_length,
990 if (qat_sym_cd_cipher_set(session,
991 aead_xform->key.data,
992 aead_xform->key.length))
999 unsigned int qat_sym_session_get_private_size(
1000 struct rte_cryptodev *dev __rte_unused)
1002 return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
1005 /* returns block size in bytes per cipher algo */
1006 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
1008 switch (qat_cipher_alg) {
1009 case ICP_QAT_HW_CIPHER_ALGO_DES:
1010 return ICP_QAT_HW_DES_BLK_SZ;
1011 case ICP_QAT_HW_CIPHER_ALGO_3DES:
1012 return ICP_QAT_HW_3DES_BLK_SZ;
1013 case ICP_QAT_HW_CIPHER_ALGO_AES128:
1014 case ICP_QAT_HW_CIPHER_ALGO_AES192:
1015 case ICP_QAT_HW_CIPHER_ALGO_AES256:
1016 return ICP_QAT_HW_AES_BLK_SZ;
1018 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
1025 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
1026 * This is digest size rounded up to nearest quadword
1028 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1030 switch (qat_hash_alg) {
1031 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1032 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
1033 QAT_HW_DEFAULT_ALIGNMENT);
1034 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1035 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
1036 QAT_HW_DEFAULT_ALIGNMENT);
1037 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1038 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
1039 QAT_HW_DEFAULT_ALIGNMENT);
1040 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1041 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
1042 QAT_HW_DEFAULT_ALIGNMENT);
1043 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1044 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1045 QAT_HW_DEFAULT_ALIGNMENT);
1046 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1047 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
1048 QAT_HW_DEFAULT_ALIGNMENT);
1049 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1050 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1051 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1052 QAT_HW_DEFAULT_ALIGNMENT);
1053 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1054 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1055 QAT_HW_DEFAULT_ALIGNMENT);
1056 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1057 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1058 QAT_HW_DEFAULT_ALIGNMENT);
1059 case ICP_QAT_HW_AUTH_ALGO_MD5:
1060 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1061 QAT_HW_DEFAULT_ALIGNMENT);
1062 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1063 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1064 QAT_HW_DEFAULT_ALIGNMENT);
1065 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1066 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1067 QAT_HW_DEFAULT_ALIGNMENT);
1068 case ICP_QAT_HW_AUTH_ALGO_NULL:
1069 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1070 QAT_HW_DEFAULT_ALIGNMENT);
1071 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1072 /* return maximum state1 size in this case */
1073 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1074 QAT_HW_DEFAULT_ALIGNMENT);
1076 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1082 /* returns digest size in bytes per hash algo */
1083 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1085 switch (qat_hash_alg) {
1086 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1087 return ICP_QAT_HW_SHA1_STATE1_SZ;
1088 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1089 return ICP_QAT_HW_SHA224_STATE1_SZ;
1090 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1091 return ICP_QAT_HW_SHA256_STATE1_SZ;
1092 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1093 return ICP_QAT_HW_SHA384_STATE1_SZ;
1094 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1095 return ICP_QAT_HW_SHA512_STATE1_SZ;
1096 case ICP_QAT_HW_AUTH_ALGO_MD5:
1097 return ICP_QAT_HW_MD5_STATE1_SZ;
1098 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1099 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1100 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1101 /* return maximum digest size in this case */
1102 return ICP_QAT_HW_SHA512_STATE1_SZ;
1104 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1110 /* returns block size in byes per hash algo */
1111 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1113 switch (qat_hash_alg) {
1114 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1116 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1117 return SHA256_CBLOCK;
1118 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1119 return SHA256_CBLOCK;
1120 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1121 return SHA512_CBLOCK;
1122 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1123 return SHA512_CBLOCK;
1124 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1126 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1127 return ICP_QAT_HW_AES_BLK_SZ;
1128 case ICP_QAT_HW_AUTH_ALGO_MD5:
1130 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1131 /* return maximum block size in this case */
1132 return SHA512_CBLOCK;
1134 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1140 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1144 if (!SHA1_Init(&ctx))
1146 SHA1_Transform(&ctx, data_in);
1147 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1151 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1155 if (!SHA224_Init(&ctx))
1157 SHA256_Transform(&ctx, data_in);
1158 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1162 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1166 if (!SHA256_Init(&ctx))
1168 SHA256_Transform(&ctx, data_in);
1169 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1173 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1177 if (!SHA384_Init(&ctx))
1179 SHA512_Transform(&ctx, data_in);
1180 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1184 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1188 if (!SHA512_Init(&ctx))
1190 SHA512_Transform(&ctx, data_in);
1191 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1195 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1199 if (!MD5_Init(&ctx))
1201 MD5_Transform(&ctx, data_in);
1202 rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1207 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1212 uint8_t digest[qat_hash_get_digest_size(
1213 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1214 uint32_t *hash_state_out_be32;
1215 uint64_t *hash_state_out_be64;
1218 /* Initialize to avoid gcc warning */
1219 memset(digest, 0, sizeof(digest));
1221 digest_size = qat_hash_get_digest_size(hash_alg);
1222 if (digest_size <= 0)
1225 hash_state_out_be32 = (uint32_t *)data_out;
1226 hash_state_out_be64 = (uint64_t *)data_out;
1229 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1230 if (partial_hash_sha1(data_in, digest))
1232 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1233 *hash_state_out_be32 =
1234 rte_bswap32(*(((uint32_t *)digest)+i));
1236 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1237 if (partial_hash_sha224(data_in, digest))
1239 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1240 *hash_state_out_be32 =
1241 rte_bswap32(*(((uint32_t *)digest)+i));
1243 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1244 if (partial_hash_sha256(data_in, digest))
1246 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1247 *hash_state_out_be32 =
1248 rte_bswap32(*(((uint32_t *)digest)+i));
1250 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1251 if (partial_hash_sha384(data_in, digest))
1253 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1254 *hash_state_out_be64 =
1255 rte_bswap64(*(((uint64_t *)digest)+i));
1257 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1258 if (partial_hash_sha512(data_in, digest))
1260 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1261 *hash_state_out_be64 =
1262 rte_bswap64(*(((uint64_t *)digest)+i));
1264 case ICP_QAT_HW_AUTH_ALGO_MD5:
1265 if (partial_hash_md5(data_in, data_out))
1269 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1275 #define HMAC_IPAD_VALUE 0x36
1276 #define HMAC_OPAD_VALUE 0x5c
1277 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1279 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1281 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1285 derived[0] = base[0] << 1;
1286 for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1287 derived[i] = base[i] << 1;
1288 derived[i - 1] |= base[i] >> 7;
1292 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1295 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1296 const uint8_t *auth_key,
1297 uint16_t auth_keylen,
1298 uint8_t *p_state_buf,
1299 uint16_t *p_state_len,
1303 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1304 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1307 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1313 uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1316 auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1318 in = rte_zmalloc("AES CMAC K1",
1319 ICP_QAT_HW_AES_128_KEY_SZ, 16);
1322 QAT_LOG(ERR, "Failed to alloc memory");
1326 rte_memcpy(in, AES_CMAC_SEED,
1327 ICP_QAT_HW_AES_128_KEY_SZ);
1328 rte_memcpy(p_state_buf, auth_key, auth_keylen);
1330 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1336 AES_encrypt(in, k0, &enc_key);
1338 k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1339 k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1341 aes_cmac_key_derive(k0, k1);
1342 aes_cmac_key_derive(k1, k2);
1344 memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1345 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1349 static uint8_t qat_aes_xcbc_key_seed[
1350 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1351 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1352 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1353 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1354 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1355 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1356 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1360 uint8_t *out = p_state_buf;
1364 in = rte_zmalloc("working mem for key",
1365 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1367 QAT_LOG(ERR, "Failed to alloc memory");
1371 rte_memcpy(in, qat_aes_xcbc_key_seed,
1372 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1373 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1374 if (AES_set_encrypt_key(auth_key,
1378 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1380 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1381 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1384 AES_encrypt(in, out, &enc_key);
1385 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1386 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1388 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1389 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1393 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1394 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1396 uint8_t *out = p_state_buf;
1399 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1400 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1401 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1402 in = rte_zmalloc("working mem for key",
1403 ICP_QAT_HW_GALOIS_H_SZ, 16);
1405 QAT_LOG(ERR, "Failed to alloc memory");
1409 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1410 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1414 AES_encrypt(in, out, &enc_key);
1415 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1416 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1417 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1422 block_size = qat_hash_get_block_size(hash_alg);
1425 /* init ipad and opad from key and xor with fixed values */
1426 memset(ipad, 0, block_size);
1427 memset(opad, 0, block_size);
1429 if (auth_keylen > (unsigned int)block_size) {
1430 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1433 rte_memcpy(ipad, auth_key, auth_keylen);
1434 rte_memcpy(opad, auth_key, auth_keylen);
1436 for (i = 0; i < block_size; i++) {
1437 uint8_t *ipad_ptr = ipad + i;
1438 uint8_t *opad_ptr = opad + i;
1439 *ipad_ptr ^= HMAC_IPAD_VALUE;
1440 *opad_ptr ^= HMAC_OPAD_VALUE;
1443 /* do partial hash of ipad and copy to state1 */
1444 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1445 memset(ipad, 0, block_size);
1446 memset(opad, 0, block_size);
1447 QAT_LOG(ERR, "ipad precompute failed");
1452 * State len is a multiple of 8, so may be larger than the digest.
1453 * Put the partial hash of opad state_len bytes after state1
1455 *p_state_len = qat_hash_get_state1_size(hash_alg);
1456 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1457 memset(ipad, 0, block_size);
1458 memset(opad, 0, block_size);
1459 QAT_LOG(ERR, "opad precompute failed");
1463 /* don't leave data lying around */
1464 memset(ipad, 0, block_size);
1465 memset(opad, 0, block_size);
1470 qat_sym_session_init_common_hdr(struct qat_sym_session *session)
1472 struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
1473 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1474 enum qat_sym_proto_flag proto_flags = session->qat_proto_flag;
1475 uint32_t slice_flags = session->slice_types;
1478 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1479 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1480 header->service_cmd_id = session->qat_cmd;
1481 header->comn_req_flags =
1482 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1483 QAT_COMN_PTR_TYPE_FLAT);
1484 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1485 ICP_QAT_FW_LA_PARTIAL_NONE);
1486 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1487 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1489 switch (proto_flags) {
1490 case QAT_CRYPTO_PROTO_FLAG_NONE:
1491 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1492 ICP_QAT_FW_LA_NO_PROTO);
1494 case QAT_CRYPTO_PROTO_FLAG_CCM:
1495 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1496 ICP_QAT_FW_LA_CCM_PROTO);
1498 case QAT_CRYPTO_PROTO_FLAG_GCM:
1499 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1500 ICP_QAT_FW_LA_GCM_PROTO);
1502 case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1503 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1504 ICP_QAT_FW_LA_SNOW_3G_PROTO);
1506 case QAT_CRYPTO_PROTO_FLAG_ZUC:
1507 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1508 ICP_QAT_FW_LA_ZUC_3G_PROTO);
1512 /* More than one of the following flags can be set at once */
1513 if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_SPC)) {
1514 ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
1515 header->serv_specif_flags,
1516 ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
1518 if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_UCS)) {
1519 ICP_QAT_FW_LA_SLICE_TYPE_SET(
1520 header->serv_specif_flags,
1521 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
1524 if (session->is_auth) {
1525 if (session->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1526 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1527 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1528 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1529 ICP_QAT_FW_LA_CMP_AUTH_RES);
1530 } else if (session->auth_op == ICP_QAT_HW_AUTH_GENERATE) {
1531 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1532 ICP_QAT_FW_LA_RET_AUTH_RES);
1533 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1534 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1537 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1538 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1539 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1540 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1543 if (session->is_iv12B) {
1544 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1545 header->serv_specif_flags,
1546 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1549 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1550 ICP_QAT_FW_LA_NO_UPDATE_STATE);
1551 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1552 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1555 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
1556 const uint8_t *cipherkey,
1557 uint32_t cipherkeylen)
1559 struct icp_qat_hw_cipher_algo_blk *cipher;
1560 struct icp_qat_hw_cipher_algo_blk20 *cipher20;
1561 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1562 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1563 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1564 void *ptr = &req_tmpl->cd_ctrl;
1565 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1566 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1567 enum icp_qat_hw_cipher_convert key_convert;
1568 struct icp_qat_fw_la_cipher_20_req_params *req_ucs =
1569 (struct icp_qat_fw_la_cipher_20_req_params *)
1570 &cdesc->fw_req.serv_specif_rqpars;
1571 struct icp_qat_fw_la_cipher_req_params *req_cipher =
1572 (struct icp_qat_fw_la_cipher_req_params *)
1573 &cdesc->fw_req.serv_specif_rqpars;
1574 uint32_t total_key_size;
1575 uint16_t cipher_offset, cd_size;
1576 uint32_t wordIndex = 0;
1577 uint32_t *temp_key = NULL;
1579 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1580 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1581 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1582 ICP_QAT_FW_SLICE_CIPHER);
1583 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1584 ICP_QAT_FW_SLICE_DRAM_WR);
1585 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1586 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1587 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1588 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1589 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1590 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1591 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1592 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1593 ICP_QAT_FW_SLICE_CIPHER);
1594 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1595 ICP_QAT_FW_SLICE_AUTH);
1596 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1597 ICP_QAT_FW_SLICE_AUTH);
1598 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1599 ICP_QAT_FW_SLICE_DRAM_WR);
1600 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1601 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1602 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1606 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1608 * CTR Streaming ciphers are a special case. Decrypt = encrypt
1609 * Overriding default values previously set.
1610 * Chacha20-Poly1305 is special case, CTR but single-pass
1611 * so both direction need to be used.
1613 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1614 if (cdesc->qat_cipher_alg ==
1615 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 &&
1616 cdesc->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1617 cdesc->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
1619 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1620 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1621 || cdesc->qat_cipher_alg ==
1622 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1623 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1624 else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1625 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1626 else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE)
1627 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1629 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1631 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1632 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1633 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1634 cipher_cd_ctrl->cipher_state_sz =
1635 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1636 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1638 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1639 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1640 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1641 cipher_cd_ctrl->cipher_padding_sz =
1642 (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1643 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1644 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1645 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1646 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1647 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1648 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1649 } else if (cdesc->qat_cipher_alg ==
1650 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1651 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1652 ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1653 cipher_cd_ctrl->cipher_state_sz =
1654 ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1655 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1656 cdesc->min_qat_dev_gen = QAT_GEN2;
1658 total_key_size = cipherkeylen;
1659 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1661 cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1662 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1664 cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1665 cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
1666 cipher->cipher_config.val =
1667 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1668 cdesc->qat_cipher_alg, key_convert,
1671 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1672 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1673 sizeof(struct icp_qat_hw_cipher_config)
1675 memcpy(cipher->key, cipherkey, cipherkeylen);
1676 memcpy(temp_key, cipherkey, cipherkeylen);
1678 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1679 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1681 temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1683 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1684 cipherkeylen + cipherkeylen;
1685 } else if (cdesc->is_ucs) {
1686 const uint8_t *final_key = cipherkey;
1688 cdesc->slice_types |= QAT_CRYPTO_SLICE_UCS;
1689 total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
1690 ICP_QAT_HW_AES_128_KEY_SZ);
1691 cipher20->cipher_config.reserved[0] = 0;
1692 cipher20->cipher_config.reserved[1] = 0;
1693 cipher20->cipher_config.reserved[2] = 0;
1695 rte_memcpy(cipher20->key, final_key, cipherkeylen);
1696 cdesc->cd_cur_ptr +=
1697 sizeof(struct icp_qat_hw_ucs_cipher_config) +
1700 memcpy(cipher->key, cipherkey, cipherkeylen);
1701 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1705 if (cdesc->is_single_pass) {
1706 QAT_FIELD_SET(cipher->cipher_config.val,
1707 cdesc->digest_length,
1708 QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
1709 QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
1710 /* UCS and SPC 1.8/2.0 share configuration of 2nd config word */
1711 cdesc->cd.cipher.cipher_config.reserved =
1712 ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
1714 cdesc->slice_types |= QAT_CRYPTO_SLICE_SPC;
1717 if (total_key_size > cipherkeylen) {
1718 uint32_t padding_size = total_key_size-cipherkeylen;
1719 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1720 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1721 /* K3 not provided so use K1 = K3*/
1722 memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1723 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1724 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1725 /* K2 and K3 not provided so use K1 = K2 = K3*/
1726 memcpy(cdesc->cd_cur_ptr, cipherkey,
1728 memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1729 cipherkey, cipherkeylen);
1731 memset(cdesc->cd_cur_ptr, 0, padding_size);
1733 cdesc->cd_cur_ptr += padding_size;
1735 if (cdesc->is_ucs) {
1737 * These values match in terms of position auth
1738 * slice request fields
1740 req_ucs->spc_auth_res_sz = cdesc->digest_length;
1741 if (!cdesc->is_gmac) {
1742 req_ucs->spc_aad_sz = cdesc->aad_len;
1743 req_ucs->spc_aad_offset = 0;
1745 } else if (cdesc->is_single_pass) {
1746 req_cipher->spc_aad_sz = cdesc->aad_len;
1747 req_cipher->spc_auth_res_sz = cdesc->digest_length;
1749 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1750 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1751 cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1756 int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
1757 const uint8_t *authkey,
1758 uint32_t authkeylen,
1759 uint32_t aad_length,
1760 uint32_t digestsize,
1761 unsigned int operation)
1763 struct icp_qat_hw_auth_setup *hash;
1764 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1765 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1766 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1767 void *ptr = &req_tmpl->cd_ctrl;
1768 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1769 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1770 struct icp_qat_fw_la_auth_req_params *auth_param =
1771 (struct icp_qat_fw_la_auth_req_params *)
1772 ((char *)&req_tmpl->serv_specif_rqpars +
1773 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1774 uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1775 uint16_t hash_offset, cd_size;
1776 uint32_t *aad_len = NULL;
1777 uint32_t wordIndex = 0;
1780 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1781 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1782 ICP_QAT_FW_SLICE_AUTH);
1783 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1784 ICP_QAT_FW_SLICE_DRAM_WR);
1785 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1786 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1787 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1788 ICP_QAT_FW_SLICE_AUTH);
1789 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1790 ICP_QAT_FW_SLICE_CIPHER);
1791 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1792 ICP_QAT_FW_SLICE_CIPHER);
1793 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1794 ICP_QAT_FW_SLICE_DRAM_WR);
1795 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1796 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1797 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1801 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY)
1802 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
1804 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
1807 * Setup the inner hash config
1809 hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1810 hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
1811 hash->auth_config.reserved = 0;
1812 hash->auth_config.config =
1813 ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
1814 cdesc->qat_hash_alg, digestsize);
1816 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
1817 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
1818 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
1819 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
1820 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
1821 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
1822 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
1823 || cdesc->is_cnt_zero
1825 hash->auth_counter.counter = 0;
1827 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
1831 hash->auth_counter.counter = rte_bswap32(block_size);
1834 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
1837 * cd_cur_ptr now points at the state1 information.
1839 switch (cdesc->qat_hash_alg) {
1840 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1841 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1843 rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
1844 sizeof(sha1InitialState));
1845 state1_size = qat_hash_get_state1_size(
1846 cdesc->qat_hash_alg);
1850 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
1851 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1853 QAT_LOG(ERR, "(SHA)precompute failed");
1856 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
1858 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1859 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1861 rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
1862 sizeof(sha224InitialState));
1863 state1_size = qat_hash_get_state1_size(
1864 cdesc->qat_hash_alg);
1868 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
1869 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1871 QAT_LOG(ERR, "(SHA)precompute failed");
1874 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
1876 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1877 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1879 rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
1880 sizeof(sha256InitialState));
1881 state1_size = qat_hash_get_state1_size(
1882 cdesc->qat_hash_alg);
1886 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
1887 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1889 QAT_LOG(ERR, "(SHA)precompute failed");
1892 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
1894 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1895 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1897 rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
1898 sizeof(sha384InitialState));
1899 state1_size = qat_hash_get_state1_size(
1900 cdesc->qat_hash_alg);
1904 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
1905 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1907 QAT_LOG(ERR, "(SHA)precompute failed");
1910 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
1912 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1913 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1915 rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
1916 sizeof(sha512InitialState));
1917 state1_size = qat_hash_get_state1_size(
1918 cdesc->qat_hash_alg);
1922 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
1923 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1925 QAT_LOG(ERR, "(SHA)precompute failed");
1928 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
1930 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1931 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1933 if (cdesc->aes_cmac)
1934 memset(cdesc->cd_cur_ptr, 0, state1_size);
1935 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
1936 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
1937 &state2_size, cdesc->aes_cmac)) {
1938 cdesc->aes_cmac ? QAT_LOG(ERR,
1939 "(CMAC)precompute failed")
1941 "(XCBC)precompute failed");
1945 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1946 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1947 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1948 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
1949 if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
1950 authkeylen, cdesc->cd_cur_ptr + state1_size,
1951 &state2_size, cdesc->aes_cmac)) {
1952 QAT_LOG(ERR, "(GCM)precompute failed");
1956 * Write (the length of AAD) into bytes 16-19 of state2
1957 * in big-endian format. This field is 8 bytes
1959 auth_param->u2.aad_sz =
1960 RTE_ALIGN_CEIL(aad_length, 16);
1961 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
1963 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
1964 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
1965 ICP_QAT_HW_GALOIS_H_SZ);
1966 *aad_len = rte_bswap32(aad_length);
1967 cdesc->aad_len = aad_length;
1969 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1970 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1971 state1_size = qat_hash_get_state1_size(
1972 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
1973 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
1974 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1976 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
1977 (cdesc->cd_cur_ptr + state1_size + state2_size);
1978 cipherconfig->cipher_config.val =
1979 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
1980 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
1981 ICP_QAT_HW_CIPHER_KEY_CONVERT,
1982 ICP_QAT_HW_CIPHER_ENCRYPT);
1983 memcpy(cipherconfig->key, authkey, authkeylen);
1984 memset(cipherconfig->key + authkeylen,
1985 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
1986 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
1987 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1988 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1990 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1991 hash->auth_config.config =
1992 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
1993 cdesc->qat_hash_alg, digestsize);
1994 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1995 state1_size = qat_hash_get_state1_size(
1996 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
1997 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
1998 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
1999 + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
2001 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2002 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
2003 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
2004 cdesc->min_qat_dev_gen = QAT_GEN2;
2007 case ICP_QAT_HW_AUTH_ALGO_MD5:
2008 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
2009 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2011 QAT_LOG(ERR, "(MD5)precompute failed");
2014 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
2016 case ICP_QAT_HW_AUTH_ALGO_NULL:
2017 state1_size = qat_hash_get_state1_size(
2018 ICP_QAT_HW_AUTH_ALGO_NULL);
2019 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
2021 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
2022 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
2023 state1_size = qat_hash_get_state1_size(
2024 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
2025 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
2026 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
2028 if (aad_length > 0) {
2029 aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
2030 ICP_QAT_HW_CCM_AAD_LEN_INFO;
2031 auth_param->u2.aad_sz =
2032 RTE_ALIGN_CEIL(aad_length,
2033 ICP_QAT_HW_CCM_AAD_ALIGNMENT);
2035 auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
2037 cdesc->aad_len = aad_length;
2038 hash->auth_counter.counter = 0;
2040 hash_cd_ctrl->outer_prefix_sz = digestsize;
2041 auth_param->hash_state_sz = digestsize;
2043 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2045 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
2046 state1_size = qat_hash_get_state1_size(
2047 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
2048 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
2049 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
2050 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
2053 * The Inner Hash Initial State2 block must contain IK
2054 * (Initialisation Key), followed by IK XOR-ed with KM
2055 * (Key Modifier): IK||(IK^KM).
2057 /* write the auth key */
2058 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2059 /* initialise temp key with auth key */
2060 memcpy(pTempKey, authkey, authkeylen);
2061 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
2062 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
2063 pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
2066 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
2070 /* Auth CD config setup */
2071 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
2072 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
2073 hash_cd_ctrl->inner_res_sz = digestsize;
2074 hash_cd_ctrl->final_sz = digestsize;
2075 hash_cd_ctrl->inner_state1_sz = state1_size;
2076 auth_param->auth_res_sz = digestsize;
2078 hash_cd_ctrl->inner_state2_sz = state2_size;
2079 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2080 ((sizeof(struct icp_qat_hw_auth_setup) +
2081 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2084 cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2085 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2087 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2088 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2093 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2096 case ICP_QAT_HW_AES_128_KEY_SZ:
2097 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2099 case ICP_QAT_HW_AES_192_KEY_SZ:
2100 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2102 case ICP_QAT_HW_AES_256_KEY_SZ:
2103 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2111 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2112 enum icp_qat_hw_cipher_algo *alg)
2115 case ICP_QAT_HW_AES_128_KEY_SZ:
2116 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2118 case ICP_QAT_HW_AES_256_KEY_SZ:
2119 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2127 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2130 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2131 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2139 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2142 case ICP_QAT_HW_KASUMI_KEY_SZ:
2143 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2151 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2154 case ICP_QAT_HW_DES_KEY_SZ:
2155 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2163 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2166 case QAT_3DES_KEY_SZ_OPT1:
2167 case QAT_3DES_KEY_SZ_OPT2:
2168 case QAT_3DES_KEY_SZ_OPT3:
2169 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2177 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2180 case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2181 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2189 #ifdef RTE_LIB_SECURITY
2191 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2193 struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2194 struct rte_security_docsis_xform *docsis = &conf->docsis;
2196 /* CRC generate -> Cipher encrypt */
2197 if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2199 if (crypto_sym != NULL &&
2200 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2201 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2202 crypto_sym->cipher.algo ==
2203 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2204 (crypto_sym->cipher.key.length ==
2205 ICP_QAT_HW_AES_128_KEY_SZ ||
2206 crypto_sym->cipher.key.length ==
2207 ICP_QAT_HW_AES_256_KEY_SZ) &&
2208 crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2209 crypto_sym->next == NULL) {
2212 /* Cipher decrypt -> CRC verify */
2213 } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2215 if (crypto_sym != NULL &&
2216 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2217 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2218 crypto_sym->cipher.algo ==
2219 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2220 (crypto_sym->cipher.key.length ==
2221 ICP_QAT_HW_AES_128_KEY_SZ ||
2222 crypto_sym->cipher.key.length ==
2223 ICP_QAT_HW_AES_256_KEY_SZ) &&
2224 crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2225 crypto_sym->next == NULL) {
2234 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2235 struct rte_security_session_conf *conf, void *session_private)
2239 struct rte_crypto_sym_xform *xform = NULL;
2240 struct qat_sym_session *session = session_private;
2242 /* Clear the session */
2243 memset(session, 0, qat_sym_session_get_private_size(dev));
2245 ret = qat_sec_session_check_docsis(conf);
2247 QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2251 xform = conf->crypto_xform;
2253 /* Verify the session physical address is known */
2254 rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2255 if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2257 "Session physical address unknown. Bad memory pool.");
2261 /* Set context descriptor physical address */
2262 session->cd_paddr = session_paddr +
2263 offsetof(struct qat_sym_session, cd);
2265 session->min_qat_dev_gen = QAT_GEN1;
2267 /* Get requested QAT command id - should be cipher */
2268 qat_cmd_id = qat_get_cmd_id(xform);
2269 if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2270 QAT_LOG(ERR, "Unsupported xform chain requested");
2273 session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2275 ret = qat_sym_session_configure_cipher(dev, xform, session);
2278 qat_sym_session_finalize(session);
2284 qat_security_session_create(void *dev,
2285 struct rte_security_session_conf *conf,
2286 struct rte_security_session *sess,
2287 struct rte_mempool *mempool)
2289 void *sess_private_data;
2290 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2293 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2294 conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2295 QAT_LOG(ERR, "Invalid security protocol");
2299 if (rte_mempool_get(mempool, &sess_private_data)) {
2300 QAT_LOG(ERR, "Couldn't get object from session mempool");
2304 ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2307 QAT_LOG(ERR, "Failed to configure session parameters");
2308 /* Return session to mempool */
2309 rte_mempool_put(mempool, sess_private_data);
2313 set_sec_session_private_data(sess, sess_private_data);
2319 qat_security_session_destroy(void *dev __rte_unused,
2320 struct rte_security_session *sess)
2322 void *sess_priv = get_sec_session_private_data(sess);
2323 struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2327 bpi_cipher_ctx_free(s->bpi_ctx);
2328 memset(s, 0, qat_sym_session_get_private_size(dev));
2329 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2331 set_sec_session_private_data(sess, NULL);
2332 rte_mempool_put(sess_mp, sess_priv);