1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 * Copyright(c) 2015-2019 Intel Corporation
5 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
6 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
7 #include <openssl/md5.h> /* Needed to calculate pre-compute values */
8 #include <openssl/evp.h> /* Needed for bpi runt block processing */
10 #include <rte_memcpy.h>
11 #include <rte_common.h>
12 #include <rte_spinlock.h>
13 #include <rte_byteorder.h>
15 #include <rte_malloc.h>
16 #include <rte_crypto_sym.h>
17 #ifdef RTE_LIB_SECURITY
18 #include <rte_security.h>
22 #include "qat_sym_session.h"
23 #include "qat_sym_pmd.h"
25 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
26 static const uint8_t sha1InitialState[] = {
27 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
28 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
30 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
31 static const uint8_t sha224InitialState[] = {
32 0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
33 0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
34 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
36 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
37 static const uint8_t sha256InitialState[] = {
38 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
39 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
40 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
42 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
43 static const uint8_t sha384InitialState[] = {
44 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
45 0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
46 0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
47 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
48 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
49 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
51 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
52 static const uint8_t sha512InitialState[] = {
53 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
54 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
55 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
56 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
57 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
58 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
61 qat_sym_cd_cipher_set(struct qat_sym_session *cd,
62 const uint8_t *enckey,
66 qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
67 const uint8_t *authkey,
71 unsigned int operation);
73 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
75 /* Req/cd init functions */
78 qat_sym_session_finalize(struct qat_sym_session *session)
80 qat_sym_session_init_common_hdr(session);
83 /** Frees a context previously created
84 * Depends on openssl libcrypto
87 bpi_cipher_ctx_free(void *bpi_ctx)
90 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
93 /** Creates a context in either AES or DES in ECB mode
94 * Depends on openssl libcrypto
97 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
98 enum rte_crypto_cipher_operation direction __rte_unused,
99 const uint8_t *key, uint16_t key_length, void **ctx)
101 const EVP_CIPHER *algo = NULL;
103 *ctx = EVP_CIPHER_CTX_new();
110 if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
111 algo = EVP_des_ecb();
113 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
114 algo = EVP_aes_128_ecb();
116 algo = EVP_aes_256_ecb();
118 /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
119 if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
128 EVP_CIPHER_CTX_free(*ctx);
133 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
134 struct qat_cryptodev_private *internals)
137 const struct rte_cryptodev_capabilities *capability;
139 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
140 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
141 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
144 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
147 if (capability->sym.cipher.algo == algo)
154 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
155 struct qat_cryptodev_private *internals)
158 const struct rte_cryptodev_capabilities *capability;
160 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
161 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
162 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
165 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
168 if (capability->sym.auth.algo == algo)
175 qat_sym_session_clear(struct rte_cryptodev *dev,
176 struct rte_cryptodev_sym_session *sess)
178 uint8_t index = dev->driver_id;
179 void *sess_priv = get_sym_session_private_data(sess, index);
180 struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
184 bpi_cipher_ctx_free(s->bpi_ctx);
185 memset(s, 0, qat_sym_session_get_private_size(dev));
186 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
188 set_sym_session_private_data(sess, index, NULL);
189 rte_mempool_put(sess_mp, sess_priv);
194 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
197 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
198 return ICP_QAT_FW_LA_CMD_CIPHER;
200 /* Authentication Only */
201 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
202 return ICP_QAT_FW_LA_CMD_AUTH;
205 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
206 /* AES-GCM and AES-CCM works with different direction
207 * GCM first encrypts and generate hash where AES-CCM
208 * first generate hash and encrypts. Similar relation
209 * applies to decryption.
211 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
212 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
213 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
215 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
217 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
218 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
220 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
223 if (xform->next == NULL)
226 /* Cipher then Authenticate */
227 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
228 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
229 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
231 /* Authenticate then Cipher */
232 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
233 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
234 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
239 static struct rte_crypto_auth_xform *
240 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
243 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
252 static struct rte_crypto_cipher_xform *
253 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
256 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
257 return &xform->cipher;
266 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
267 struct rte_crypto_sym_xform *xform,
268 struct qat_sym_session *session)
270 struct qat_cryptodev_private *internals = dev->data->dev_private;
271 struct rte_crypto_cipher_xform *cipher_xform = NULL;
272 enum qat_device_gen qat_dev_gen =
273 internals->qat_dev->qat_dev_gen;
276 /* Get cipher xform from crypto xform chain */
277 cipher_xform = qat_get_cipher_xform(xform);
279 session->cipher_iv.offset = cipher_xform->iv.offset;
280 session->cipher_iv.length = cipher_xform->iv.length;
282 switch (cipher_xform->algo) {
283 case RTE_CRYPTO_CIPHER_AES_CBC:
284 if (qat_sym_validate_aes_key(cipher_xform->key.length,
285 &session->qat_cipher_alg) != 0) {
286 QAT_LOG(ERR, "Invalid AES cipher key size");
290 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
292 case RTE_CRYPTO_CIPHER_AES_CTR:
293 if (qat_sym_validate_aes_key(cipher_xform->key.length,
294 &session->qat_cipher_alg) != 0) {
295 QAT_LOG(ERR, "Invalid AES cipher key size");
299 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
300 if (qat_dev_gen == QAT_GEN4)
303 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
304 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
305 &session->qat_cipher_alg) != 0) {
306 QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
310 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
312 case RTE_CRYPTO_CIPHER_NULL:
313 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
314 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
316 case RTE_CRYPTO_CIPHER_KASUMI_F8:
317 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
318 &session->qat_cipher_alg) != 0) {
319 QAT_LOG(ERR, "Invalid KASUMI cipher key size");
323 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
325 case RTE_CRYPTO_CIPHER_3DES_CBC:
326 if (qat_sym_validate_3des_key(cipher_xform->key.length,
327 &session->qat_cipher_alg) != 0) {
328 QAT_LOG(ERR, "Invalid 3DES cipher key size");
332 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
334 case RTE_CRYPTO_CIPHER_DES_CBC:
335 if (qat_sym_validate_des_key(cipher_xform->key.length,
336 &session->qat_cipher_alg) != 0) {
337 QAT_LOG(ERR, "Invalid DES cipher key size");
341 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
343 case RTE_CRYPTO_CIPHER_3DES_CTR:
344 if (qat_sym_validate_3des_key(cipher_xform->key.length,
345 &session->qat_cipher_alg) != 0) {
346 QAT_LOG(ERR, "Invalid 3DES cipher key size");
350 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
352 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
353 ret = bpi_cipher_ctx_init(
356 cipher_xform->key.data,
357 cipher_xform->key.length,
360 QAT_LOG(ERR, "failed to create DES BPI ctx");
363 if (qat_sym_validate_des_key(cipher_xform->key.length,
364 &session->qat_cipher_alg) != 0) {
365 QAT_LOG(ERR, "Invalid DES cipher key size");
369 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
371 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
372 ret = bpi_cipher_ctx_init(
375 cipher_xform->key.data,
376 cipher_xform->key.length,
379 QAT_LOG(ERR, "failed to create AES BPI ctx");
382 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
383 &session->qat_cipher_alg) != 0) {
384 QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
388 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
390 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
391 if (!qat_is_cipher_alg_supported(
392 cipher_xform->algo, internals)) {
393 QAT_LOG(ERR, "%s not supported on this device",
394 rte_crypto_cipher_algorithm_strings
395 [cipher_xform->algo]);
399 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
400 &session->qat_cipher_alg) != 0) {
401 QAT_LOG(ERR, "Invalid ZUC cipher key size");
405 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
407 case RTE_CRYPTO_CIPHER_AES_XTS:
408 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
409 QAT_LOG(ERR, "AES-XTS-192 not supported");
413 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
414 &session->qat_cipher_alg) != 0) {
415 QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
419 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
421 case RTE_CRYPTO_CIPHER_3DES_ECB:
422 case RTE_CRYPTO_CIPHER_AES_ECB:
423 case RTE_CRYPTO_CIPHER_AES_F8:
424 case RTE_CRYPTO_CIPHER_ARC4:
425 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
430 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
436 if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
437 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
439 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
441 if (qat_sym_cd_cipher_set(session,
442 cipher_xform->key.data,
443 cipher_xform->key.length)) {
451 if (session->bpi_ctx) {
452 bpi_cipher_ctx_free(session->bpi_ctx);
453 session->bpi_ctx = NULL;
459 qat_sym_session_configure(struct rte_cryptodev *dev,
460 struct rte_crypto_sym_xform *xform,
461 struct rte_cryptodev_sym_session *sess,
462 struct rte_mempool *mempool)
464 void *sess_private_data;
467 if (rte_mempool_get(mempool, &sess_private_data)) {
469 "Couldn't get object from session mempool");
473 ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
476 "Crypto QAT PMD: failed to configure session parameters");
478 /* Return session to mempool */
479 rte_mempool_put(mempool, sess_private_data);
483 set_sym_session_private_data(sess, dev->driver_id,
490 qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
493 struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
494 struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
495 (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
496 session->fw_req.cd_ctrl.content_desc_ctrl_lw;
498 /* Set the Use Extended Protocol Flags bit in LW 1 */
499 QAT_FIELD_SET(header->comn_req_flags,
500 QAT_COMN_EXT_FLAGS_USED,
501 QAT_COMN_EXT_FLAGS_BITPOS,
502 QAT_COMN_EXT_FLAGS_MASK);
504 /* Set Hash Flags in LW 28 */
505 cd_ctrl->hash_flags |= hash_flag;
507 /* Set proto flags in LW 1 */
508 switch (session->qat_cipher_alg) {
509 case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
510 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
511 ICP_QAT_FW_LA_SNOW_3G_PROTO);
512 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
513 header->serv_specif_flags, 0);
515 case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
516 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
517 ICP_QAT_FW_LA_NO_PROTO);
518 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
519 header->serv_specif_flags,
520 ICP_QAT_FW_LA_ZUC_3G_PROTO);
523 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
524 ICP_QAT_FW_LA_NO_PROTO);
525 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
526 header->serv_specif_flags, 0);
532 qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
533 struct qat_sym_session *session)
535 const struct qat_cryptodev_private *qat_private =
536 dev->data->dev_private;
537 enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
538 QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
540 if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
541 session->qat_cipher_alg !=
542 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
543 session->min_qat_dev_gen = min_dev_gen;
544 qat_sym_session_set_ext_hash_flags(session,
545 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
546 } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
547 session->qat_cipher_alg !=
548 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
549 session->min_qat_dev_gen = min_dev_gen;
550 qat_sym_session_set_ext_hash_flags(session,
551 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
552 } else if ((session->aes_cmac ||
553 session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
554 (session->qat_cipher_alg ==
555 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
556 session->qat_cipher_alg ==
557 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
558 session->min_qat_dev_gen = min_dev_gen;
559 qat_sym_session_set_ext_hash_flags(session, 0);
564 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
565 struct rte_crypto_sym_xform *xform, void *session_private)
567 struct qat_sym_session *session = session_private;
568 struct qat_cryptodev_private *internals = dev->data->dev_private;
569 enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
572 int handle_mixed = 0;
574 /* Verify the session physical address is known */
575 rte_iova_t session_paddr = rte_mempool_virt2iova(session);
576 if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
578 "Session physical address unknown. Bad memory pool.");
582 memset(session, 0, sizeof(*session));
583 /* Set context descriptor physical address */
584 session->cd_paddr = session_paddr +
585 offsetof(struct qat_sym_session, cd);
587 session->min_qat_dev_gen = QAT_GEN1;
588 session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
591 /* Get requested QAT command id */
592 qat_cmd_id = qat_get_cmd_id(xform);
593 if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
594 QAT_LOG(ERR, "Unsupported xform chain requested");
597 session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
598 switch (session->qat_cmd) {
599 case ICP_QAT_FW_LA_CMD_CIPHER:
600 ret = qat_sym_session_configure_cipher(dev, xform, session);
604 case ICP_QAT_FW_LA_CMD_AUTH:
605 ret = qat_sym_session_configure_auth(dev, xform, session);
608 session->is_single_pass_gmac =
609 qat_dev_gen == QAT_GEN3 &&
610 xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
611 xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
613 case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
614 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
615 ret = qat_sym_session_configure_aead(dev, xform,
620 ret = qat_sym_session_configure_cipher(dev,
624 ret = qat_sym_session_configure_auth(dev,
631 case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
632 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
633 ret = qat_sym_session_configure_aead(dev, xform,
638 ret = qat_sym_session_configure_auth(dev,
642 ret = qat_sym_session_configure_cipher(dev,
649 case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
650 case ICP_QAT_FW_LA_CMD_TRNG_TEST:
651 case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
652 case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
653 case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
654 case ICP_QAT_FW_LA_CMD_MGF1:
655 case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
656 case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
657 case ICP_QAT_FW_LA_CMD_DELIMITER:
658 QAT_LOG(ERR, "Unsupported Service %u",
662 QAT_LOG(ERR, "Unsupported Service %u",
666 qat_sym_session_finalize(session);
668 /* Special handling of mixed hash+cipher algorithms */
669 qat_sym_session_handle_mixed(dev, session);
676 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
677 const struct rte_crypto_aead_xform *aead_xform)
679 session->is_single_pass = 1;
680 session->is_auth = 1;
681 session->min_qat_dev_gen = QAT_GEN3;
682 session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
683 /* Chacha-Poly is special case that use QAT CTR mode */
684 if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
685 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
687 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
689 session->cipher_iv.offset = aead_xform->iv.offset;
690 session->cipher_iv.length = aead_xform->iv.length;
691 session->aad_len = aead_xform->aad_length;
692 session->digest_length = aead_xform->digest_length;
694 if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
695 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
696 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
698 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
699 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
706 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
707 struct rte_crypto_sym_xform *xform,
708 struct qat_sym_session *session)
710 struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
711 struct qat_cryptodev_private *internals = dev->data->dev_private;
712 const uint8_t *key_data = auth_xform->key.data;
713 uint8_t key_length = auth_xform->key.length;
714 enum qat_device_gen qat_dev_gen =
715 internals->qat_dev->qat_dev_gen;
717 session->aes_cmac = 0;
718 session->auth_key_length = auth_xform->key.length;
719 session->auth_iv.offset = auth_xform->iv.offset;
720 session->auth_iv.length = auth_xform->iv.length;
721 session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
722 session->is_auth = 1;
723 session->digest_length = auth_xform->digest_length;
725 switch (auth_xform->algo) {
726 case RTE_CRYPTO_AUTH_SHA1:
727 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
728 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
730 case RTE_CRYPTO_AUTH_SHA224:
731 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
732 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
734 case RTE_CRYPTO_AUTH_SHA256:
735 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
736 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
738 case RTE_CRYPTO_AUTH_SHA384:
739 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
740 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
742 case RTE_CRYPTO_AUTH_SHA512:
743 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
744 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
746 case RTE_CRYPTO_AUTH_SHA1_HMAC:
747 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
749 case RTE_CRYPTO_AUTH_SHA224_HMAC:
750 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
752 case RTE_CRYPTO_AUTH_SHA256_HMAC:
753 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
755 case RTE_CRYPTO_AUTH_SHA384_HMAC:
756 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
758 case RTE_CRYPTO_AUTH_SHA512_HMAC:
759 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
761 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
762 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
764 case RTE_CRYPTO_AUTH_AES_CMAC:
765 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
766 session->aes_cmac = 1;
768 case RTE_CRYPTO_AUTH_AES_GMAC:
769 if (qat_sym_validate_aes_key(auth_xform->key.length,
770 &session->qat_cipher_alg) != 0) {
771 QAT_LOG(ERR, "Invalid AES key size");
774 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
775 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
776 if (session->auth_iv.length == 0)
777 session->auth_iv.length = AES_GCM_J0_LEN;
779 session->is_iv12B = 1;
780 if (qat_dev_gen == QAT_GEN4) {
781 session->is_cnt_zero = 1;
785 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
786 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
788 case RTE_CRYPTO_AUTH_MD5_HMAC:
789 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
791 case RTE_CRYPTO_AUTH_NULL:
792 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
794 case RTE_CRYPTO_AUTH_KASUMI_F9:
795 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
797 case RTE_CRYPTO_AUTH_ZUC_EIA3:
798 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
799 QAT_LOG(ERR, "%s not supported on this device",
800 rte_crypto_auth_algorithm_strings
804 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
806 case RTE_CRYPTO_AUTH_MD5:
807 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
808 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
812 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
817 if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
818 session->is_gmac = 1;
819 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
820 session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
821 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
823 * It needs to create cipher desc content first,
824 * then authentication
826 if (qat_sym_cd_cipher_set(session,
827 auth_xform->key.data,
828 auth_xform->key.length))
831 if (qat_sym_cd_auth_set(session,
835 auth_xform->digest_length,
839 session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
840 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
842 * It needs to create authentication desc content first,
846 if (qat_sym_cd_auth_set(session,
850 auth_xform->digest_length,
854 if (qat_sym_cd_cipher_set(session,
855 auth_xform->key.data,
856 auth_xform->key.length))
860 if (qat_sym_cd_auth_set(session,
864 auth_xform->digest_length,
873 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
874 struct rte_crypto_sym_xform *xform,
875 struct qat_sym_session *session)
877 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
878 enum rte_crypto_auth_operation crypto_operation;
879 struct qat_cryptodev_private *internals =
880 dev->data->dev_private;
881 enum qat_device_gen qat_dev_gen =
882 internals->qat_dev->qat_dev_gen;
885 * Store AEAD IV parameters as cipher IV,
886 * to avoid unnecessary memory usage
888 session->cipher_iv.offset = xform->aead.iv.offset;
889 session->cipher_iv.length = xform->aead.iv.length;
891 session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
892 session->is_auth = 1;
893 session->digest_length = aead_xform->digest_length;
895 session->is_single_pass = 0;
896 switch (aead_xform->algo) {
897 case RTE_CRYPTO_AEAD_AES_GCM:
898 if (qat_sym_validate_aes_key(aead_xform->key.length,
899 &session->qat_cipher_alg) != 0) {
900 QAT_LOG(ERR, "Invalid AES key size");
903 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
904 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
906 if (qat_dev_gen == QAT_GEN4)
908 if (session->cipher_iv.length == 0) {
909 session->cipher_iv.length = AES_GCM_J0_LEN;
912 session->is_iv12B = 1;
913 if (qat_dev_gen < QAT_GEN3)
915 qat_sym_session_handle_single_pass(session,
918 case RTE_CRYPTO_AEAD_AES_CCM:
919 if (qat_sym_validate_aes_key(aead_xform->key.length,
920 &session->qat_cipher_alg) != 0) {
921 QAT_LOG(ERR, "Invalid AES key size");
924 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
925 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
926 if (qat_dev_gen == QAT_GEN4)
929 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
930 if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
932 if (qat_dev_gen == QAT_GEN4)
934 session->qat_cipher_alg =
935 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
936 qat_sym_session_handle_single_pass(session,
940 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
945 if (session->is_single_pass) {
946 if (qat_sym_cd_cipher_set(session,
947 aead_xform->key.data, aead_xform->key.length))
949 } else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
950 aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
951 (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
952 aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
953 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
955 * It needs to create cipher desc content first,
956 * then authentication
958 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
959 RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
961 if (qat_sym_cd_cipher_set(session,
962 aead_xform->key.data,
963 aead_xform->key.length))
966 if (qat_sym_cd_auth_set(session,
967 aead_xform->key.data,
968 aead_xform->key.length,
969 aead_xform->aad_length,
970 aead_xform->digest_length,
974 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
976 * It needs to create authentication desc content first,
980 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
981 RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
983 if (qat_sym_cd_auth_set(session,
984 aead_xform->key.data,
985 aead_xform->key.length,
986 aead_xform->aad_length,
987 aead_xform->digest_length,
991 if (qat_sym_cd_cipher_set(session,
992 aead_xform->key.data,
993 aead_xform->key.length))
1000 unsigned int qat_sym_session_get_private_size(
1001 struct rte_cryptodev *dev __rte_unused)
1003 return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
1006 /* returns block size in bytes per cipher algo */
1007 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
1009 switch (qat_cipher_alg) {
1010 case ICP_QAT_HW_CIPHER_ALGO_DES:
1011 return ICP_QAT_HW_DES_BLK_SZ;
1012 case ICP_QAT_HW_CIPHER_ALGO_3DES:
1013 return ICP_QAT_HW_3DES_BLK_SZ;
1014 case ICP_QAT_HW_CIPHER_ALGO_AES128:
1015 case ICP_QAT_HW_CIPHER_ALGO_AES192:
1016 case ICP_QAT_HW_CIPHER_ALGO_AES256:
1017 return ICP_QAT_HW_AES_BLK_SZ;
1019 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
1026 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
1027 * This is digest size rounded up to nearest quadword
1029 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1031 switch (qat_hash_alg) {
1032 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1033 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
1034 QAT_HW_DEFAULT_ALIGNMENT);
1035 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1036 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
1037 QAT_HW_DEFAULT_ALIGNMENT);
1038 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1039 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
1040 QAT_HW_DEFAULT_ALIGNMENT);
1041 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1042 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
1043 QAT_HW_DEFAULT_ALIGNMENT);
1044 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1045 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1046 QAT_HW_DEFAULT_ALIGNMENT);
1047 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1048 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
1049 QAT_HW_DEFAULT_ALIGNMENT);
1050 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1051 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1052 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1053 QAT_HW_DEFAULT_ALIGNMENT);
1054 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1055 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1056 QAT_HW_DEFAULT_ALIGNMENT);
1057 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1058 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1059 QAT_HW_DEFAULT_ALIGNMENT);
1060 case ICP_QAT_HW_AUTH_ALGO_MD5:
1061 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1062 QAT_HW_DEFAULT_ALIGNMENT);
1063 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1064 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1065 QAT_HW_DEFAULT_ALIGNMENT);
1066 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1067 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1068 QAT_HW_DEFAULT_ALIGNMENT);
1069 case ICP_QAT_HW_AUTH_ALGO_NULL:
1070 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1071 QAT_HW_DEFAULT_ALIGNMENT);
1072 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1073 /* return maximum state1 size in this case */
1074 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1075 QAT_HW_DEFAULT_ALIGNMENT);
1077 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1083 /* returns digest size in bytes per hash algo */
1084 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1086 switch (qat_hash_alg) {
1087 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1088 return ICP_QAT_HW_SHA1_STATE1_SZ;
1089 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1090 return ICP_QAT_HW_SHA224_STATE1_SZ;
1091 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1092 return ICP_QAT_HW_SHA256_STATE1_SZ;
1093 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1094 return ICP_QAT_HW_SHA384_STATE1_SZ;
1095 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1096 return ICP_QAT_HW_SHA512_STATE1_SZ;
1097 case ICP_QAT_HW_AUTH_ALGO_MD5:
1098 return ICP_QAT_HW_MD5_STATE1_SZ;
1099 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1100 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1101 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1102 /* return maximum digest size in this case */
1103 return ICP_QAT_HW_SHA512_STATE1_SZ;
1105 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1111 /* returns block size in byes per hash algo */
1112 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1114 switch (qat_hash_alg) {
1115 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1117 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1118 return SHA256_CBLOCK;
1119 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1120 return SHA256_CBLOCK;
1121 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1122 return SHA512_CBLOCK;
1123 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1124 return SHA512_CBLOCK;
1125 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1127 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1128 return ICP_QAT_HW_AES_BLK_SZ;
1129 case ICP_QAT_HW_AUTH_ALGO_MD5:
1131 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1132 /* return maximum block size in this case */
1133 return SHA512_CBLOCK;
1135 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1141 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1145 if (!SHA1_Init(&ctx))
1147 SHA1_Transform(&ctx, data_in);
1148 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1152 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1156 if (!SHA224_Init(&ctx))
1158 SHA256_Transform(&ctx, data_in);
1159 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1163 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1167 if (!SHA256_Init(&ctx))
1169 SHA256_Transform(&ctx, data_in);
1170 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1174 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1178 if (!SHA384_Init(&ctx))
1180 SHA512_Transform(&ctx, data_in);
1181 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1185 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1189 if (!SHA512_Init(&ctx))
1191 SHA512_Transform(&ctx, data_in);
1192 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1196 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1200 if (!MD5_Init(&ctx))
1202 MD5_Transform(&ctx, data_in);
1203 rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1208 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1213 uint8_t digest[qat_hash_get_digest_size(
1214 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1215 uint32_t *hash_state_out_be32;
1216 uint64_t *hash_state_out_be64;
1219 /* Initialize to avoid gcc warning */
1220 memset(digest, 0, sizeof(digest));
1222 digest_size = qat_hash_get_digest_size(hash_alg);
1223 if (digest_size <= 0)
1226 hash_state_out_be32 = (uint32_t *)data_out;
1227 hash_state_out_be64 = (uint64_t *)data_out;
1230 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1231 if (partial_hash_sha1(data_in, digest))
1233 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1234 *hash_state_out_be32 =
1235 rte_bswap32(*(((uint32_t *)digest)+i));
1237 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1238 if (partial_hash_sha224(data_in, digest))
1240 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1241 *hash_state_out_be32 =
1242 rte_bswap32(*(((uint32_t *)digest)+i));
1244 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1245 if (partial_hash_sha256(data_in, digest))
1247 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1248 *hash_state_out_be32 =
1249 rte_bswap32(*(((uint32_t *)digest)+i));
1251 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1252 if (partial_hash_sha384(data_in, digest))
1254 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1255 *hash_state_out_be64 =
1256 rte_bswap64(*(((uint64_t *)digest)+i));
1258 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1259 if (partial_hash_sha512(data_in, digest))
1261 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1262 *hash_state_out_be64 =
1263 rte_bswap64(*(((uint64_t *)digest)+i));
1265 case ICP_QAT_HW_AUTH_ALGO_MD5:
1266 if (partial_hash_md5(data_in, data_out))
1270 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1276 #define HMAC_IPAD_VALUE 0x36
1277 #define HMAC_OPAD_VALUE 0x5c
1278 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1280 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1282 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1286 derived[0] = base[0] << 1;
1287 for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1288 derived[i] = base[i] << 1;
1289 derived[i - 1] |= base[i] >> 7;
1293 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1296 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1297 const uint8_t *auth_key,
1298 uint16_t auth_keylen,
1299 uint8_t *p_state_buf,
1300 uint16_t *p_state_len,
1304 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1305 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1308 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1314 uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1317 auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1319 in = rte_zmalloc("AES CMAC K1",
1320 ICP_QAT_HW_AES_128_KEY_SZ, 16);
1323 QAT_LOG(ERR, "Failed to alloc memory");
1327 rte_memcpy(in, AES_CMAC_SEED,
1328 ICP_QAT_HW_AES_128_KEY_SZ);
1329 rte_memcpy(p_state_buf, auth_key, auth_keylen);
1331 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1337 AES_encrypt(in, k0, &enc_key);
1339 k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1340 k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1342 aes_cmac_key_derive(k0, k1);
1343 aes_cmac_key_derive(k1, k2);
1345 memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1346 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1350 static uint8_t qat_aes_xcbc_key_seed[
1351 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1352 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1353 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1354 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1355 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1356 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1357 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1361 uint8_t *out = p_state_buf;
1365 in = rte_zmalloc("working mem for key",
1366 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1368 QAT_LOG(ERR, "Failed to alloc memory");
1372 rte_memcpy(in, qat_aes_xcbc_key_seed,
1373 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1374 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1375 if (AES_set_encrypt_key(auth_key,
1379 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1381 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1382 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1385 AES_encrypt(in, out, &enc_key);
1386 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1387 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1389 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1390 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1394 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1395 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1397 uint8_t *out = p_state_buf;
1400 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1401 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1402 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1403 in = rte_zmalloc("working mem for key",
1404 ICP_QAT_HW_GALOIS_H_SZ, 16);
1406 QAT_LOG(ERR, "Failed to alloc memory");
1410 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1411 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1415 AES_encrypt(in, out, &enc_key);
1416 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1417 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1418 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1423 block_size = qat_hash_get_block_size(hash_alg);
1426 /* init ipad and opad from key and xor with fixed values */
1427 memset(ipad, 0, block_size);
1428 memset(opad, 0, block_size);
1430 if (auth_keylen > (unsigned int)block_size) {
1431 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1434 rte_memcpy(ipad, auth_key, auth_keylen);
1435 rte_memcpy(opad, auth_key, auth_keylen);
1437 for (i = 0; i < block_size; i++) {
1438 uint8_t *ipad_ptr = ipad + i;
1439 uint8_t *opad_ptr = opad + i;
1440 *ipad_ptr ^= HMAC_IPAD_VALUE;
1441 *opad_ptr ^= HMAC_OPAD_VALUE;
1444 /* do partial hash of ipad and copy to state1 */
1445 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1446 memset(ipad, 0, block_size);
1447 memset(opad, 0, block_size);
1448 QAT_LOG(ERR, "ipad precompute failed");
1453 * State len is a multiple of 8, so may be larger than the digest.
1454 * Put the partial hash of opad state_len bytes after state1
1456 *p_state_len = qat_hash_get_state1_size(hash_alg);
1457 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1458 memset(ipad, 0, block_size);
1459 memset(opad, 0, block_size);
1460 QAT_LOG(ERR, "opad precompute failed");
1464 /* don't leave data lying around */
1465 memset(ipad, 0, block_size);
1466 memset(opad, 0, block_size);
1471 qat_sym_session_init_common_hdr(struct qat_sym_session *session)
1473 struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
1474 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1475 enum qat_sym_proto_flag proto_flags = session->qat_proto_flag;
1476 uint32_t slice_flags = session->slice_types;
1479 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1480 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1481 header->service_cmd_id = session->qat_cmd;
1482 header->comn_req_flags =
1483 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1484 QAT_COMN_PTR_TYPE_FLAT);
1485 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1486 ICP_QAT_FW_LA_PARTIAL_NONE);
1487 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1488 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1490 switch (proto_flags) {
1491 case QAT_CRYPTO_PROTO_FLAG_NONE:
1492 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1493 ICP_QAT_FW_LA_NO_PROTO);
1495 case QAT_CRYPTO_PROTO_FLAG_CCM:
1496 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1497 ICP_QAT_FW_LA_CCM_PROTO);
1499 case QAT_CRYPTO_PROTO_FLAG_GCM:
1500 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1501 ICP_QAT_FW_LA_GCM_PROTO);
1503 case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1504 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1505 ICP_QAT_FW_LA_SNOW_3G_PROTO);
1507 case QAT_CRYPTO_PROTO_FLAG_ZUC:
1508 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1509 ICP_QAT_FW_LA_ZUC_3G_PROTO);
1513 /* More than one of the following flags can be set at once */
1514 if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_SPC)) {
1515 ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
1516 header->serv_specif_flags,
1517 ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
1519 if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_UCS)) {
1520 ICP_QAT_FW_LA_SLICE_TYPE_SET(
1521 header->serv_specif_flags,
1522 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
1525 if (session->is_auth) {
1526 if (session->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1527 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1528 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1529 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1530 ICP_QAT_FW_LA_CMP_AUTH_RES);
1531 } else if (session->auth_op == ICP_QAT_HW_AUTH_GENERATE) {
1532 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1533 ICP_QAT_FW_LA_RET_AUTH_RES);
1534 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1535 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1538 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1539 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1540 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1541 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1544 if (session->is_iv12B) {
1545 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1546 header->serv_specif_flags,
1547 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1550 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1551 ICP_QAT_FW_LA_NO_UPDATE_STATE);
1552 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1553 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1556 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
1557 const uint8_t *cipherkey,
1558 uint32_t cipherkeylen)
1560 struct icp_qat_hw_cipher_algo_blk *cipher;
1561 struct icp_qat_hw_cipher_algo_blk20 *cipher20;
1562 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1563 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1564 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1565 void *ptr = &req_tmpl->cd_ctrl;
1566 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1567 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1568 enum icp_qat_hw_cipher_convert key_convert;
1569 struct icp_qat_fw_la_cipher_20_req_params *req_ucs =
1570 (struct icp_qat_fw_la_cipher_20_req_params *)
1571 &cdesc->fw_req.serv_specif_rqpars;
1572 struct icp_qat_fw_la_cipher_req_params *req_cipher =
1573 (struct icp_qat_fw_la_cipher_req_params *)
1574 &cdesc->fw_req.serv_specif_rqpars;
1575 uint32_t total_key_size;
1576 uint16_t cipher_offset, cd_size;
1577 uint32_t wordIndex = 0;
1578 uint32_t *temp_key = NULL;
1580 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1581 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1582 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1583 ICP_QAT_FW_SLICE_CIPHER);
1584 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1585 ICP_QAT_FW_SLICE_DRAM_WR);
1586 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1587 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1588 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1589 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1590 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1591 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1592 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1593 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1594 ICP_QAT_FW_SLICE_CIPHER);
1595 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1596 ICP_QAT_FW_SLICE_AUTH);
1597 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1598 ICP_QAT_FW_SLICE_AUTH);
1599 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1600 ICP_QAT_FW_SLICE_DRAM_WR);
1601 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1602 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1603 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1607 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1609 * CTR Streaming ciphers are a special case. Decrypt = encrypt
1610 * Overriding default values previously set.
1611 * Chacha20-Poly1305 is special case, CTR but single-pass
1612 * so both direction need to be used.
1614 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1615 if (cdesc->qat_cipher_alg ==
1616 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 &&
1617 cdesc->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1618 cdesc->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
1620 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1621 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1622 || cdesc->qat_cipher_alg ==
1623 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1624 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1625 else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1626 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1627 else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE)
1628 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1630 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1632 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1633 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1634 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1635 cipher_cd_ctrl->cipher_state_sz =
1636 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1637 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1639 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1640 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1641 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1642 cipher_cd_ctrl->cipher_padding_sz =
1643 (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1644 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1645 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1646 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1647 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1648 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1649 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1650 } else if (cdesc->qat_cipher_alg ==
1651 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1652 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1653 ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1654 cipher_cd_ctrl->cipher_state_sz =
1655 ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1656 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1657 cdesc->min_qat_dev_gen = QAT_GEN2;
1659 total_key_size = cipherkeylen;
1660 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1662 cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1663 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1665 cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1666 cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
1667 cipher->cipher_config.val =
1668 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1669 cdesc->qat_cipher_alg, key_convert,
1672 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1673 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1674 sizeof(struct icp_qat_hw_cipher_config)
1676 memcpy(cipher->key, cipherkey, cipherkeylen);
1677 memcpy(temp_key, cipherkey, cipherkeylen);
1679 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1680 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1682 temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1684 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1685 cipherkeylen + cipherkeylen;
1686 } else if (cdesc->is_ucs) {
1687 const uint8_t *final_key = cipherkey;
1689 cdesc->slice_types |= QAT_CRYPTO_SLICE_UCS;
1690 total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
1691 ICP_QAT_HW_AES_128_KEY_SZ);
1692 cipher20->cipher_config.reserved[0] = 0;
1693 cipher20->cipher_config.reserved[1] = 0;
1694 cipher20->cipher_config.reserved[2] = 0;
1696 rte_memcpy(cipher20->key, final_key, cipherkeylen);
1697 cdesc->cd_cur_ptr +=
1698 sizeof(struct icp_qat_hw_ucs_cipher_config) +
1701 memcpy(cipher->key, cipherkey, cipherkeylen);
1702 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1706 if (cdesc->is_single_pass) {
1707 QAT_FIELD_SET(cipher->cipher_config.val,
1708 cdesc->digest_length,
1709 QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
1710 QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
1711 /* UCS and SPC 1.8/2.0 share configuration of 2nd config word */
1712 cdesc->cd.cipher.cipher_config.reserved =
1713 ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
1715 cdesc->slice_types |= QAT_CRYPTO_SLICE_SPC;
1718 if (total_key_size > cipherkeylen) {
1719 uint32_t padding_size = total_key_size-cipherkeylen;
1720 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1721 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1722 /* K3 not provided so use K1 = K3*/
1723 memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1724 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1725 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1726 /* K2 and K3 not provided so use K1 = K2 = K3*/
1727 memcpy(cdesc->cd_cur_ptr, cipherkey,
1729 memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1730 cipherkey, cipherkeylen);
1732 memset(cdesc->cd_cur_ptr, 0, padding_size);
1734 cdesc->cd_cur_ptr += padding_size;
1736 if (cdesc->is_ucs) {
1738 * These values match in terms of position auth
1739 * slice request fields
1741 req_ucs->spc_auth_res_sz = cdesc->digest_length;
1742 if (!cdesc->is_gmac) {
1743 req_ucs->spc_aad_sz = cdesc->aad_len;
1744 req_ucs->spc_aad_offset = 0;
1746 } else if (cdesc->is_single_pass) {
1747 req_cipher->spc_aad_sz = cdesc->aad_len;
1748 req_cipher->spc_auth_res_sz = cdesc->digest_length;
1750 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1751 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1752 cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1757 int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
1758 const uint8_t *authkey,
1759 uint32_t authkeylen,
1760 uint32_t aad_length,
1761 uint32_t digestsize,
1762 unsigned int operation)
1764 struct icp_qat_hw_auth_setup *hash;
1765 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1766 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1767 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1768 void *ptr = &req_tmpl->cd_ctrl;
1769 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1770 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1771 struct icp_qat_fw_la_auth_req_params *auth_param =
1772 (struct icp_qat_fw_la_auth_req_params *)
1773 ((char *)&req_tmpl->serv_specif_rqpars +
1774 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1775 uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1776 uint16_t hash_offset, cd_size;
1777 uint32_t *aad_len = NULL;
1778 uint32_t wordIndex = 0;
1781 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1782 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1783 ICP_QAT_FW_SLICE_AUTH);
1784 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1785 ICP_QAT_FW_SLICE_DRAM_WR);
1786 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1787 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1788 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1789 ICP_QAT_FW_SLICE_AUTH);
1790 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1791 ICP_QAT_FW_SLICE_CIPHER);
1792 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1793 ICP_QAT_FW_SLICE_CIPHER);
1794 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1795 ICP_QAT_FW_SLICE_DRAM_WR);
1796 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1797 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1798 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1802 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY)
1803 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
1805 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
1808 * Setup the inner hash config
1810 hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1811 hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
1812 hash->auth_config.reserved = 0;
1813 hash->auth_config.config =
1814 ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
1815 cdesc->qat_hash_alg, digestsize);
1817 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
1818 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
1819 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
1820 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
1821 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
1822 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
1823 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
1824 || cdesc->is_cnt_zero
1826 hash->auth_counter.counter = 0;
1828 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
1832 hash->auth_counter.counter = rte_bswap32(block_size);
1835 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
1838 * cd_cur_ptr now points at the state1 information.
1840 switch (cdesc->qat_hash_alg) {
1841 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1842 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1844 rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
1845 sizeof(sha1InitialState));
1846 state1_size = qat_hash_get_state1_size(
1847 cdesc->qat_hash_alg);
1851 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
1852 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1854 QAT_LOG(ERR, "(SHA)precompute failed");
1857 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
1859 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1860 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1862 rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
1863 sizeof(sha224InitialState));
1864 state1_size = qat_hash_get_state1_size(
1865 cdesc->qat_hash_alg);
1869 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
1870 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1872 QAT_LOG(ERR, "(SHA)precompute failed");
1875 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
1877 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1878 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1880 rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
1881 sizeof(sha256InitialState));
1882 state1_size = qat_hash_get_state1_size(
1883 cdesc->qat_hash_alg);
1887 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
1888 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1890 QAT_LOG(ERR, "(SHA)precompute failed");
1893 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
1895 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1896 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1898 rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
1899 sizeof(sha384InitialState));
1900 state1_size = qat_hash_get_state1_size(
1901 cdesc->qat_hash_alg);
1905 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
1906 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1908 QAT_LOG(ERR, "(SHA)precompute failed");
1911 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
1913 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1914 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1916 rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
1917 sizeof(sha512InitialState));
1918 state1_size = qat_hash_get_state1_size(
1919 cdesc->qat_hash_alg);
1923 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
1924 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1926 QAT_LOG(ERR, "(SHA)precompute failed");
1929 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
1931 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1932 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1934 if (cdesc->aes_cmac)
1935 memset(cdesc->cd_cur_ptr, 0, state1_size);
1936 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
1937 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
1938 &state2_size, cdesc->aes_cmac)) {
1939 cdesc->aes_cmac ? QAT_LOG(ERR,
1940 "(CMAC)precompute failed")
1942 "(XCBC)precompute failed");
1946 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1947 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1948 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1949 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
1950 if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
1951 authkeylen, cdesc->cd_cur_ptr + state1_size,
1952 &state2_size, cdesc->aes_cmac)) {
1953 QAT_LOG(ERR, "(GCM)precompute failed");
1957 * Write (the length of AAD) into bytes 16-19 of state2
1958 * in big-endian format. This field is 8 bytes
1960 auth_param->u2.aad_sz =
1961 RTE_ALIGN_CEIL(aad_length, 16);
1962 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
1964 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
1965 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
1966 ICP_QAT_HW_GALOIS_H_SZ);
1967 *aad_len = rte_bswap32(aad_length);
1968 cdesc->aad_len = aad_length;
1970 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1971 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1972 state1_size = qat_hash_get_state1_size(
1973 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
1974 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
1975 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1977 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
1978 (cdesc->cd_cur_ptr + state1_size + state2_size);
1979 cipherconfig->cipher_config.val =
1980 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
1981 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
1982 ICP_QAT_HW_CIPHER_KEY_CONVERT,
1983 ICP_QAT_HW_CIPHER_ENCRYPT);
1984 memcpy(cipherconfig->key, authkey, authkeylen);
1985 memset(cipherconfig->key + authkeylen,
1986 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
1987 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
1988 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1989 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1991 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1992 hash->auth_config.config =
1993 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
1994 cdesc->qat_hash_alg, digestsize);
1995 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1996 state1_size = qat_hash_get_state1_size(
1997 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
1998 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
1999 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
2000 + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
2002 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2003 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
2004 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
2005 cdesc->min_qat_dev_gen = QAT_GEN2;
2008 case ICP_QAT_HW_AUTH_ALGO_MD5:
2009 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
2010 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2012 QAT_LOG(ERR, "(MD5)precompute failed");
2015 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
2017 case ICP_QAT_HW_AUTH_ALGO_NULL:
2018 state1_size = qat_hash_get_state1_size(
2019 ICP_QAT_HW_AUTH_ALGO_NULL);
2020 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
2022 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
2023 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
2024 state1_size = qat_hash_get_state1_size(
2025 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
2026 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
2027 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
2029 if (aad_length > 0) {
2030 aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
2031 ICP_QAT_HW_CCM_AAD_LEN_INFO;
2032 auth_param->u2.aad_sz =
2033 RTE_ALIGN_CEIL(aad_length,
2034 ICP_QAT_HW_CCM_AAD_ALIGNMENT);
2036 auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
2038 cdesc->aad_len = aad_length;
2039 hash->auth_counter.counter = 0;
2041 hash_cd_ctrl->outer_prefix_sz = digestsize;
2042 auth_param->hash_state_sz = digestsize;
2044 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2046 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
2047 state1_size = qat_hash_get_state1_size(
2048 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
2049 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
2050 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
2051 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
2054 * The Inner Hash Initial State2 block must contain IK
2055 * (Initialisation Key), followed by IK XOR-ed with KM
2056 * (Key Modifier): IK||(IK^KM).
2058 /* write the auth key */
2059 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2060 /* initialise temp key with auth key */
2061 memcpy(pTempKey, authkey, authkeylen);
2062 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
2063 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
2064 pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
2067 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
2071 /* Auth CD config setup */
2072 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
2073 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
2074 hash_cd_ctrl->inner_res_sz = digestsize;
2075 hash_cd_ctrl->final_sz = digestsize;
2076 hash_cd_ctrl->inner_state1_sz = state1_size;
2077 auth_param->auth_res_sz = digestsize;
2079 hash_cd_ctrl->inner_state2_sz = state2_size;
2080 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2081 ((sizeof(struct icp_qat_hw_auth_setup) +
2082 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2085 cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2086 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2088 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2089 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2094 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2097 case ICP_QAT_HW_AES_128_KEY_SZ:
2098 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2100 case ICP_QAT_HW_AES_192_KEY_SZ:
2101 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2103 case ICP_QAT_HW_AES_256_KEY_SZ:
2104 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2112 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2113 enum icp_qat_hw_cipher_algo *alg)
2116 case ICP_QAT_HW_AES_128_KEY_SZ:
2117 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2119 case ICP_QAT_HW_AES_256_KEY_SZ:
2120 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2128 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2131 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2132 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2140 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2143 case ICP_QAT_HW_KASUMI_KEY_SZ:
2144 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2152 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2155 case ICP_QAT_HW_DES_KEY_SZ:
2156 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2164 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2167 case QAT_3DES_KEY_SZ_OPT1:
2168 case QAT_3DES_KEY_SZ_OPT2:
2169 case QAT_3DES_KEY_SZ_OPT3:
2170 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2178 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2181 case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2182 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2190 #ifdef RTE_LIB_SECURITY
2192 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2194 struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2195 struct rte_security_docsis_xform *docsis = &conf->docsis;
2197 /* CRC generate -> Cipher encrypt */
2198 if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2200 if (crypto_sym != NULL &&
2201 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2202 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2203 crypto_sym->cipher.algo ==
2204 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2205 (crypto_sym->cipher.key.length ==
2206 ICP_QAT_HW_AES_128_KEY_SZ ||
2207 crypto_sym->cipher.key.length ==
2208 ICP_QAT_HW_AES_256_KEY_SZ) &&
2209 crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2210 crypto_sym->next == NULL) {
2213 /* Cipher decrypt -> CRC verify */
2214 } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2216 if (crypto_sym != NULL &&
2217 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2218 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2219 crypto_sym->cipher.algo ==
2220 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2221 (crypto_sym->cipher.key.length ==
2222 ICP_QAT_HW_AES_128_KEY_SZ ||
2223 crypto_sym->cipher.key.length ==
2224 ICP_QAT_HW_AES_256_KEY_SZ) &&
2225 crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2226 crypto_sym->next == NULL) {
2235 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2236 struct rte_security_session_conf *conf, void *session_private)
2240 struct rte_crypto_sym_xform *xform = NULL;
2241 struct qat_sym_session *session = session_private;
2243 /* Clear the session */
2244 memset(session, 0, qat_sym_session_get_private_size(dev));
2246 ret = qat_sec_session_check_docsis(conf);
2248 QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2252 xform = conf->crypto_xform;
2254 /* Verify the session physical address is known */
2255 rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2256 if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2258 "Session physical address unknown. Bad memory pool.");
2262 /* Set context descriptor physical address */
2263 session->cd_paddr = session_paddr +
2264 offsetof(struct qat_sym_session, cd);
2266 session->min_qat_dev_gen = QAT_GEN1;
2268 /* Get requested QAT command id - should be cipher */
2269 qat_cmd_id = qat_get_cmd_id(xform);
2270 if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2271 QAT_LOG(ERR, "Unsupported xform chain requested");
2274 session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2276 ret = qat_sym_session_configure_cipher(dev, xform, session);
2279 qat_sym_session_finalize(session);
2285 qat_security_session_create(void *dev,
2286 struct rte_security_session_conf *conf,
2287 struct rte_security_session *sess,
2288 struct rte_mempool *mempool)
2290 void *sess_private_data;
2291 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2294 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2295 conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2296 QAT_LOG(ERR, "Invalid security protocol");
2300 if (rte_mempool_get(mempool, &sess_private_data)) {
2301 QAT_LOG(ERR, "Couldn't get object from session mempool");
2305 ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2308 QAT_LOG(ERR, "Failed to configure session parameters");
2309 /* Return session to mempool */
2310 rte_mempool_put(mempool, sess_private_data);
2314 set_sec_session_private_data(sess, sess_private_data);
2320 qat_security_session_destroy(void *dev __rte_unused,
2321 struct rte_security_session *sess)
2323 void *sess_priv = get_sec_session_private_data(sess);
2324 struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2328 bpi_cipher_ctx_free(s->bpi_ctx);
2329 memset(s, 0, qat_sym_session_get_private_size(dev));
2330 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2332 set_sec_session_private_data(sess, NULL);
2333 rte_mempool_put(sess_mp, sess_priv);