2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
6 * Copyright(c) 2015-2016 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Contact Information:
20 * Copyright(c) 2015-2017 Intel Corporation.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
56 #include "../qat_logs.h"
58 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
59 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
60 #include <openssl/md5.h> /* Needed to calculate pre-compute values */
64 /* returns block size in bytes per cipher algo */
65 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
67 switch (qat_cipher_alg) {
68 case ICP_QAT_HW_CIPHER_ALGO_DES:
69 return ICP_QAT_HW_DES_BLK_SZ;
70 case ICP_QAT_HW_CIPHER_ALGO_3DES:
71 return ICP_QAT_HW_3DES_BLK_SZ;
72 case ICP_QAT_HW_CIPHER_ALGO_AES128:
73 case ICP_QAT_HW_CIPHER_ALGO_AES192:
74 case ICP_QAT_HW_CIPHER_ALGO_AES256:
75 return ICP_QAT_HW_AES_BLK_SZ;
77 PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
84 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
85 * This is digest size rounded up to nearest quadword
87 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
89 switch (qat_hash_alg) {
90 case ICP_QAT_HW_AUTH_ALGO_SHA1:
91 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
92 QAT_HW_DEFAULT_ALIGNMENT);
93 case ICP_QAT_HW_AUTH_ALGO_SHA224:
94 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
95 QAT_HW_DEFAULT_ALIGNMENT);
96 case ICP_QAT_HW_AUTH_ALGO_SHA256:
97 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
98 QAT_HW_DEFAULT_ALIGNMENT);
99 case ICP_QAT_HW_AUTH_ALGO_SHA384:
100 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
101 QAT_HW_DEFAULT_ALIGNMENT);
102 case ICP_QAT_HW_AUTH_ALGO_SHA512:
103 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
104 QAT_HW_DEFAULT_ALIGNMENT);
105 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
106 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
107 QAT_HW_DEFAULT_ALIGNMENT);
108 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
109 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
110 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
111 QAT_HW_DEFAULT_ALIGNMENT);
112 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
113 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
114 QAT_HW_DEFAULT_ALIGNMENT);
115 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
116 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
117 QAT_HW_DEFAULT_ALIGNMENT);
118 case ICP_QAT_HW_AUTH_ALGO_MD5:
119 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
120 QAT_HW_DEFAULT_ALIGNMENT);
121 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
122 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
123 QAT_HW_DEFAULT_ALIGNMENT);
124 case ICP_QAT_HW_AUTH_ALGO_NULL:
125 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
126 QAT_HW_DEFAULT_ALIGNMENT);
127 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
128 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
129 QAT_HW_DEFAULT_ALIGNMENT);
130 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
131 /* return maximum state1 size in this case */
132 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
133 QAT_HW_DEFAULT_ALIGNMENT);
135 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
141 /* returns digest size in bytes per hash algo */
142 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
144 switch (qat_hash_alg) {
145 case ICP_QAT_HW_AUTH_ALGO_SHA1:
146 return ICP_QAT_HW_SHA1_STATE1_SZ;
147 case ICP_QAT_HW_AUTH_ALGO_SHA224:
148 return ICP_QAT_HW_SHA224_STATE1_SZ;
149 case ICP_QAT_HW_AUTH_ALGO_SHA256:
150 return ICP_QAT_HW_SHA256_STATE1_SZ;
151 case ICP_QAT_HW_AUTH_ALGO_SHA384:
152 return ICP_QAT_HW_SHA384_STATE1_SZ;
153 case ICP_QAT_HW_AUTH_ALGO_SHA512:
154 return ICP_QAT_HW_SHA512_STATE1_SZ;
155 case ICP_QAT_HW_AUTH_ALGO_MD5:
156 return ICP_QAT_HW_MD5_STATE1_SZ;
157 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
158 /* return maximum digest size in this case */
159 return ICP_QAT_HW_SHA512_STATE1_SZ;
161 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
167 /* returns block size in byes per hash algo */
168 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
170 switch (qat_hash_alg) {
171 case ICP_QAT_HW_AUTH_ALGO_SHA1:
173 case ICP_QAT_HW_AUTH_ALGO_SHA224:
174 return SHA256_CBLOCK;
175 case ICP_QAT_HW_AUTH_ALGO_SHA256:
176 return SHA256_CBLOCK;
177 case ICP_QAT_HW_AUTH_ALGO_SHA384:
178 return SHA512_CBLOCK;
179 case ICP_QAT_HW_AUTH_ALGO_SHA512:
180 return SHA512_CBLOCK;
181 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
183 case ICP_QAT_HW_AUTH_ALGO_MD5:
185 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
186 /* return maximum block size in this case */
187 return SHA512_CBLOCK;
189 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
195 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
199 if (!SHA1_Init(&ctx))
201 SHA1_Transform(&ctx, data_in);
202 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
206 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
210 if (!SHA224_Init(&ctx))
212 SHA256_Transform(&ctx, data_in);
213 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
217 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
221 if (!SHA256_Init(&ctx))
223 SHA256_Transform(&ctx, data_in);
224 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
228 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
232 if (!SHA384_Init(&ctx))
234 SHA512_Transform(&ctx, data_in);
235 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
239 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
243 if (!SHA512_Init(&ctx))
245 SHA512_Transform(&ctx, data_in);
246 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
250 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
256 MD5_Transform(&ctx, data_in);
257 rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
262 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
267 uint8_t digest[qat_hash_get_digest_size(
268 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
269 uint32_t *hash_state_out_be32;
270 uint64_t *hash_state_out_be64;
273 PMD_INIT_FUNC_TRACE();
274 digest_size = qat_hash_get_digest_size(hash_alg);
275 if (digest_size <= 0)
278 hash_state_out_be32 = (uint32_t *)data_out;
279 hash_state_out_be64 = (uint64_t *)data_out;
282 case ICP_QAT_HW_AUTH_ALGO_SHA1:
283 if (partial_hash_sha1(data_in, digest))
285 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
286 *hash_state_out_be32 =
287 rte_bswap32(*(((uint32_t *)digest)+i));
289 case ICP_QAT_HW_AUTH_ALGO_SHA224:
290 if (partial_hash_sha224(data_in, digest))
292 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
293 *hash_state_out_be32 =
294 rte_bswap32(*(((uint32_t *)digest)+i));
296 case ICP_QAT_HW_AUTH_ALGO_SHA256:
297 if (partial_hash_sha256(data_in, digest))
299 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
300 *hash_state_out_be32 =
301 rte_bswap32(*(((uint32_t *)digest)+i));
303 case ICP_QAT_HW_AUTH_ALGO_SHA384:
304 if (partial_hash_sha384(data_in, digest))
306 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
307 *hash_state_out_be64 =
308 rte_bswap64(*(((uint64_t *)digest)+i));
310 case ICP_QAT_HW_AUTH_ALGO_SHA512:
311 if (partial_hash_sha512(data_in, digest))
313 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
314 *hash_state_out_be64 =
315 rte_bswap64(*(((uint64_t *)digest)+i));
317 case ICP_QAT_HW_AUTH_ALGO_MD5:
318 if (partial_hash_md5(data_in, data_out))
322 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
328 #define HMAC_IPAD_VALUE 0x36
329 #define HMAC_OPAD_VALUE 0x5c
330 #define HASH_XCBC_PRECOMP_KEY_NUM 3
332 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
333 const uint8_t *auth_key,
334 uint16_t auth_keylen,
335 uint8_t *p_state_buf,
336 uint16_t *p_state_len)
339 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
340 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
343 PMD_INIT_FUNC_TRACE();
344 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
345 static uint8_t qat_aes_xcbc_key_seed[
346 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
347 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
348 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
349 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
350 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
351 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
352 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
356 uint8_t *out = p_state_buf;
360 in = rte_zmalloc("working mem for key",
361 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
363 PMD_DRV_LOG(ERR, "Failed to alloc memory");
367 rte_memcpy(in, qat_aes_xcbc_key_seed,
368 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
369 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
370 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
373 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
375 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
376 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
379 AES_encrypt(in, out, &enc_key);
380 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
381 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
383 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
384 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
386 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
387 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
389 uint8_t *out = p_state_buf;
392 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
393 ICP_QAT_HW_GALOIS_LEN_A_SZ +
394 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
395 in = rte_zmalloc("working mem for key",
396 ICP_QAT_HW_GALOIS_H_SZ, 16);
398 PMD_DRV_LOG(ERR, "Failed to alloc memory");
402 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
403 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
407 AES_encrypt(in, out, &enc_key);
408 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
409 ICP_QAT_HW_GALOIS_LEN_A_SZ +
410 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
415 block_size = qat_hash_get_block_size(hash_alg);
418 /* init ipad and opad from key and xor with fixed values */
419 memset(ipad, 0, block_size);
420 memset(opad, 0, block_size);
422 if (auth_keylen > (unsigned int)block_size) {
423 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
426 rte_memcpy(ipad, auth_key, auth_keylen);
427 rte_memcpy(opad, auth_key, auth_keylen);
429 for (i = 0; i < block_size; i++) {
430 uint8_t *ipad_ptr = ipad + i;
431 uint8_t *opad_ptr = opad + i;
432 *ipad_ptr ^= HMAC_IPAD_VALUE;
433 *opad_ptr ^= HMAC_OPAD_VALUE;
436 /* do partial hash of ipad and copy to state1 */
437 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
438 memset(ipad, 0, block_size);
439 memset(opad, 0, block_size);
440 PMD_DRV_LOG(ERR, "ipad precompute failed");
445 * State len is a multiple of 8, so may be larger than the digest.
446 * Put the partial hash of opad state_len bytes after state1
448 *p_state_len = qat_hash_get_state1_size(hash_alg);
449 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
450 memset(ipad, 0, block_size);
451 memset(opad, 0, block_size);
452 PMD_DRV_LOG(ERR, "opad precompute failed");
456 /* don't leave data lying around */
457 memset(ipad, 0, block_size);
458 memset(opad, 0, block_size);
462 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
463 enum qat_crypto_proto_flag proto_flags)
465 PMD_INIT_FUNC_TRACE();
467 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
468 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
469 header->comn_req_flags =
470 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
471 QAT_COMN_PTR_TYPE_FLAT);
472 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
473 ICP_QAT_FW_LA_PARTIAL_NONE);
474 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
475 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
477 switch (proto_flags) {
478 case QAT_CRYPTO_PROTO_FLAG_NONE:
479 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
480 ICP_QAT_FW_LA_NO_PROTO);
482 case QAT_CRYPTO_PROTO_FLAG_CCM:
483 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
484 ICP_QAT_FW_LA_CCM_PROTO);
486 case QAT_CRYPTO_PROTO_FLAG_GCM:
487 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
488 ICP_QAT_FW_LA_GCM_PROTO);
490 case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
491 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
492 ICP_QAT_FW_LA_SNOW_3G_PROTO);
494 case QAT_CRYPTO_PROTO_FLAG_ZUC:
495 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
496 ICP_QAT_FW_LA_ZUC_3G_PROTO);
500 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
501 ICP_QAT_FW_LA_NO_UPDATE_STATE);
502 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
503 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
507 * Snow3G and ZUC should never use this function
508 * and set its protocol flag in both cipher and auth part of content
509 * descriptor building function
511 static enum qat_crypto_proto_flag
512 qat_get_crypto_proto_flag(uint16_t flags)
514 int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
515 enum qat_crypto_proto_flag qat_proto_flag =
516 QAT_CRYPTO_PROTO_FLAG_NONE;
519 case ICP_QAT_FW_LA_GCM_PROTO:
520 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
522 case ICP_QAT_FW_LA_CCM_PROTO:
523 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
527 return qat_proto_flag;
530 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
532 uint32_t cipherkeylen)
534 struct icp_qat_hw_cipher_algo_blk *cipher;
535 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
536 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
537 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
538 void *ptr = &req_tmpl->cd_ctrl;
539 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
540 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
541 enum icp_qat_hw_cipher_convert key_convert;
542 enum qat_crypto_proto_flag qat_proto_flag =
543 QAT_CRYPTO_PROTO_FLAG_NONE;
544 uint32_t total_key_size;
545 uint16_t cipher_offset, cd_size;
546 uint32_t wordIndex = 0;
547 uint32_t *temp_key = NULL;
548 PMD_INIT_FUNC_TRACE();
550 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
551 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
552 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
553 ICP_QAT_FW_SLICE_CIPHER);
554 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
555 ICP_QAT_FW_SLICE_DRAM_WR);
556 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
557 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
558 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
559 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
560 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
561 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
562 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
563 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
564 ICP_QAT_FW_SLICE_CIPHER);
565 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
566 ICP_QAT_FW_SLICE_AUTH);
567 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
568 ICP_QAT_FW_SLICE_AUTH);
569 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
570 ICP_QAT_FW_SLICE_DRAM_WR);
571 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
572 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
573 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
577 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
579 * CTR Streaming ciphers are a special case. Decrypt = encrypt
580 * Overriding default values previously set
582 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
583 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
584 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
585 || cdesc->qat_cipher_alg ==
586 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
587 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
588 else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
589 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
591 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
593 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
594 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
595 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
596 cipher_cd_ctrl->cipher_state_sz =
597 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
598 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
600 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
601 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
602 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
603 cipher_cd_ctrl->cipher_padding_sz =
604 (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
605 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
606 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
607 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
609 qat_get_crypto_proto_flag(header->serv_specif_flags);
610 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
611 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
612 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
614 qat_get_crypto_proto_flag(header->serv_specif_flags);
615 } else if (cdesc->qat_cipher_alg ==
616 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
617 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
618 ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
619 cipher_cd_ctrl->cipher_state_sz =
620 ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
621 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
622 cdesc->min_qat_dev_gen = QAT_GEN2;
624 total_key_size = cipherkeylen;
625 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
627 qat_get_crypto_proto_flag(header->serv_specif_flags);
629 cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
630 cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
631 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
633 header->service_cmd_id = cdesc->qat_cmd;
634 qat_alg_init_common_hdr(header, qat_proto_flag);
636 cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
637 cipher->cipher_config.val =
638 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
639 cdesc->qat_cipher_alg, key_convert,
642 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
643 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
644 sizeof(struct icp_qat_hw_cipher_config)
646 memcpy(cipher->key, cipherkey, cipherkeylen);
647 memcpy(temp_key, cipherkey, cipherkeylen);
649 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
650 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
652 temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
654 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
655 cipherkeylen + cipherkeylen;
657 memcpy(cipher->key, cipherkey, cipherkeylen);
658 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
662 if (total_key_size > cipherkeylen) {
663 uint32_t padding_size = total_key_size-cipherkeylen;
664 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
665 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
666 /* K3 not provided so use K1 = K3*/
667 memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
669 memset(cdesc->cd_cur_ptr, 0, padding_size);
670 cdesc->cd_cur_ptr += padding_size;
672 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
673 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
678 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
683 unsigned int operation)
685 struct icp_qat_hw_auth_setup *hash;
686 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
687 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
688 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
689 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
690 void *ptr = &req_tmpl->cd_ctrl;
691 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
692 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
693 struct icp_qat_fw_la_auth_req_params *auth_param =
694 (struct icp_qat_fw_la_auth_req_params *)
695 ((char *)&req_tmpl->serv_specif_rqpars +
696 sizeof(struct icp_qat_fw_la_cipher_req_params));
697 uint16_t state1_size = 0, state2_size = 0;
698 uint16_t hash_offset, cd_size;
699 uint32_t *aad_len = NULL;
700 uint32_t wordIndex = 0;
702 enum qat_crypto_proto_flag qat_proto_flag =
703 QAT_CRYPTO_PROTO_FLAG_NONE;
705 PMD_INIT_FUNC_TRACE();
707 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
708 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
709 ICP_QAT_FW_SLICE_AUTH);
710 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
711 ICP_QAT_FW_SLICE_DRAM_WR);
712 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
713 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
714 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
715 ICP_QAT_FW_SLICE_AUTH);
716 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
717 ICP_QAT_FW_SLICE_CIPHER);
718 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
719 ICP_QAT_FW_SLICE_CIPHER);
720 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
721 ICP_QAT_FW_SLICE_DRAM_WR);
722 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
723 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
724 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
728 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
729 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
730 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
731 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
732 ICP_QAT_FW_LA_CMP_AUTH_RES);
733 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
735 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
736 ICP_QAT_FW_LA_RET_AUTH_RES);
737 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
738 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
739 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
743 * Setup the inner hash config
745 hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
746 hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
747 hash->auth_config.reserved = 0;
748 hash->auth_config.config =
749 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
750 cdesc->qat_hash_alg, digestsize);
752 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
753 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
754 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
755 hash->auth_counter.counter = 0;
757 hash->auth_counter.counter = rte_bswap32(
758 qat_hash_get_block_size(cdesc->qat_hash_alg));
760 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
763 * cd_cur_ptr now points at the state1 information.
765 switch (cdesc->qat_hash_alg) {
766 case ICP_QAT_HW_AUTH_ALGO_SHA1:
767 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
768 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
769 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
772 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
774 case ICP_QAT_HW_AUTH_ALGO_SHA224:
775 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
776 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
777 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
780 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
782 case ICP_QAT_HW_AUTH_ALGO_SHA256:
783 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
784 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
785 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
788 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
790 case ICP_QAT_HW_AUTH_ALGO_SHA384:
791 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
792 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
793 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
796 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
798 case ICP_QAT_HW_AUTH_ALGO_SHA512:
799 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
800 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
801 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
804 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
806 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
807 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
808 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
809 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
811 PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
815 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
816 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
817 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
818 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
819 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
820 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
822 PMD_DRV_LOG(ERR, "(GCM)precompute failed");
826 * Write (the length of AAD) into bytes 16-19 of state2
827 * in big-endian format. This field is 8 bytes
829 auth_param->u2.aad_sz =
830 RTE_ALIGN_CEIL(aad_length, 16);
831 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
833 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
834 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
835 ICP_QAT_HW_GALOIS_H_SZ);
836 *aad_len = rte_bswap32(aad_length);
837 cdesc->aad_len = aad_length;
839 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
840 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
841 state1_size = qat_hash_get_state1_size(
842 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
843 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
844 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
846 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
847 (cdesc->cd_cur_ptr + state1_size + state2_size);
848 cipherconfig->cipher_config.val =
849 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
850 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
851 ICP_QAT_HW_CIPHER_KEY_CONVERT,
852 ICP_QAT_HW_CIPHER_ENCRYPT);
853 memcpy(cipherconfig->key, authkey, authkeylen);
854 memset(cipherconfig->key + authkeylen,
855 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
856 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
857 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
858 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
860 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
861 hash->auth_config.config =
862 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
863 cdesc->qat_hash_alg, digestsize);
864 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
865 state1_size = qat_hash_get_state1_size(
866 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
867 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
868 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
869 + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
871 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
872 cdesc->cd_cur_ptr += state1_size + state2_size
873 + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
874 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
875 cdesc->min_qat_dev_gen = QAT_GEN2;
878 case ICP_QAT_HW_AUTH_ALGO_MD5:
879 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
880 authkey, authkeylen, cdesc->cd_cur_ptr,
882 PMD_DRV_LOG(ERR, "(MD5)precompute failed");
885 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
887 case ICP_QAT_HW_AUTH_ALGO_NULL:
888 state1_size = qat_hash_get_state1_size(
889 ICP_QAT_HW_AUTH_ALGO_NULL);
890 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
892 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
893 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
894 state1_size = qat_hash_get_state1_size(
895 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
896 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
897 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
899 if (aad_length > 0) {
900 aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
901 ICP_QAT_HW_CCM_AAD_LEN_INFO;
902 auth_param->u2.aad_sz =
903 RTE_ALIGN_CEIL(aad_length,
904 ICP_QAT_HW_CCM_AAD_ALIGNMENT);
906 auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
909 cdesc->aad_len = aad_length;
910 hash->auth_counter.counter = 0;
912 hash_cd_ctrl->outer_prefix_sz = digestsize;
913 auth_param->hash_state_sz = digestsize;
915 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
917 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
918 state1_size = qat_hash_get_state1_size(
919 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
920 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
921 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
922 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
925 * The Inner Hash Initial State2 block must contain IK
926 * (Initialisation Key), followed by IK XOR-ed with KM
927 * (Key Modifier): IK||(IK^KM).
929 /* write the auth key */
930 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
931 /* initialise temp key with auth key */
932 memcpy(pTempKey, authkey, authkeylen);
933 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
934 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
935 pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
938 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
942 /* Request template setup */
943 qat_alg_init_common_hdr(header, qat_proto_flag);
944 header->service_cmd_id = cdesc->qat_cmd;
946 /* Auth CD config setup */
947 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
948 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
949 hash_cd_ctrl->inner_res_sz = digestsize;
950 hash_cd_ctrl->final_sz = digestsize;
951 hash_cd_ctrl->inner_state1_sz = state1_size;
952 auth_param->auth_res_sz = digestsize;
954 hash_cd_ctrl->inner_state2_sz = state2_size;
955 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
956 ((sizeof(struct icp_qat_hw_auth_setup) +
957 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
960 cdesc->cd_cur_ptr += state1_size + state2_size;
961 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
963 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
964 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
969 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
972 case ICP_QAT_HW_AES_128_KEY_SZ:
973 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
975 case ICP_QAT_HW_AES_192_KEY_SZ:
976 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
978 case ICP_QAT_HW_AES_256_KEY_SZ:
979 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
987 int qat_alg_validate_aes_docsisbpi_key(int key_len,
988 enum icp_qat_hw_cipher_algo *alg)
991 case ICP_QAT_HW_AES_128_KEY_SZ:
992 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
1000 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
1003 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
1004 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
1012 int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
1015 case ICP_QAT_HW_KASUMI_KEY_SZ:
1016 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
1024 int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
1027 case ICP_QAT_HW_DES_KEY_SZ:
1028 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
1036 int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
1039 case QAT_3DES_KEY_SZ_OPT1:
1040 case QAT_3DES_KEY_SZ_OPT2:
1041 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
1049 int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
1052 case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
1053 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;