2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
6 * Copyright(c) 2015-2016 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Contact Information:
20 * Copyright(c) 2015-2017 Intel Corporation.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
56 #include "../qat_logs.h"
58 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
59 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
60 #include <openssl/md5.h> /* Needed to calculate pre-compute values */
64 /* returns block size in bytes per cipher algo */
65 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
67 switch (qat_cipher_alg) {
68 case ICP_QAT_HW_CIPHER_ALGO_DES:
69 return ICP_QAT_HW_DES_BLK_SZ;
70 case ICP_QAT_HW_CIPHER_ALGO_3DES:
71 return ICP_QAT_HW_3DES_BLK_SZ;
72 case ICP_QAT_HW_CIPHER_ALGO_AES128:
73 case ICP_QAT_HW_CIPHER_ALGO_AES192:
74 case ICP_QAT_HW_CIPHER_ALGO_AES256:
75 return ICP_QAT_HW_AES_BLK_SZ;
77 PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
84 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
85 * This is digest size rounded up to nearest quadword
87 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
89 switch (qat_hash_alg) {
90 case ICP_QAT_HW_AUTH_ALGO_SHA1:
91 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
92 QAT_HW_DEFAULT_ALIGNMENT);
93 case ICP_QAT_HW_AUTH_ALGO_SHA224:
94 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
95 QAT_HW_DEFAULT_ALIGNMENT);
96 case ICP_QAT_HW_AUTH_ALGO_SHA256:
97 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
98 QAT_HW_DEFAULT_ALIGNMENT);
99 case ICP_QAT_HW_AUTH_ALGO_SHA384:
100 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
101 QAT_HW_DEFAULT_ALIGNMENT);
102 case ICP_QAT_HW_AUTH_ALGO_SHA512:
103 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
104 QAT_HW_DEFAULT_ALIGNMENT);
105 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
106 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
107 QAT_HW_DEFAULT_ALIGNMENT);
108 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
109 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
110 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
111 QAT_HW_DEFAULT_ALIGNMENT);
112 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
113 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
114 QAT_HW_DEFAULT_ALIGNMENT);
115 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
116 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
117 QAT_HW_DEFAULT_ALIGNMENT);
118 case ICP_QAT_HW_AUTH_ALGO_MD5:
119 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
120 QAT_HW_DEFAULT_ALIGNMENT);
121 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
122 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
123 QAT_HW_DEFAULT_ALIGNMENT);
124 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
125 /* return maximum state1 size in this case */
126 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
127 QAT_HW_DEFAULT_ALIGNMENT);
129 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
135 /* returns digest size in bytes per hash algo */
136 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
138 switch (qat_hash_alg) {
139 case ICP_QAT_HW_AUTH_ALGO_SHA1:
140 return ICP_QAT_HW_SHA1_STATE1_SZ;
141 case ICP_QAT_HW_AUTH_ALGO_SHA224:
142 return ICP_QAT_HW_SHA224_STATE1_SZ;
143 case ICP_QAT_HW_AUTH_ALGO_SHA256:
144 return ICP_QAT_HW_SHA256_STATE1_SZ;
145 case ICP_QAT_HW_AUTH_ALGO_SHA384:
146 return ICP_QAT_HW_SHA384_STATE1_SZ;
147 case ICP_QAT_HW_AUTH_ALGO_SHA512:
148 return ICP_QAT_HW_SHA512_STATE1_SZ;
149 case ICP_QAT_HW_AUTH_ALGO_MD5:
150 return ICP_QAT_HW_MD5_STATE1_SZ;
151 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
152 /* return maximum digest size in this case */
153 return ICP_QAT_HW_SHA512_STATE1_SZ;
155 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
161 /* returns block size in byes per hash algo */
162 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
164 switch (qat_hash_alg) {
165 case ICP_QAT_HW_AUTH_ALGO_SHA1:
167 case ICP_QAT_HW_AUTH_ALGO_SHA224:
168 return SHA256_CBLOCK;
169 case ICP_QAT_HW_AUTH_ALGO_SHA256:
170 return SHA256_CBLOCK;
171 case ICP_QAT_HW_AUTH_ALGO_SHA384:
172 return SHA512_CBLOCK;
173 case ICP_QAT_HW_AUTH_ALGO_SHA512:
174 return SHA512_CBLOCK;
175 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
177 case ICP_QAT_HW_AUTH_ALGO_MD5:
179 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
180 /* return maximum block size in this case */
181 return SHA512_CBLOCK;
183 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
189 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
193 if (!SHA1_Init(&ctx))
195 SHA1_Transform(&ctx, data_in);
196 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
200 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
204 if (!SHA224_Init(&ctx))
206 SHA256_Transform(&ctx, data_in);
207 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
211 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
215 if (!SHA256_Init(&ctx))
217 SHA256_Transform(&ctx, data_in);
218 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
222 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
226 if (!SHA384_Init(&ctx))
228 SHA512_Transform(&ctx, data_in);
229 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
233 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
237 if (!SHA512_Init(&ctx))
239 SHA512_Transform(&ctx, data_in);
240 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
244 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
250 MD5_Transform(&ctx, data_in);
251 rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
256 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
261 uint8_t digest[qat_hash_get_digest_size(
262 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
263 uint32_t *hash_state_out_be32;
264 uint64_t *hash_state_out_be64;
267 PMD_INIT_FUNC_TRACE();
268 digest_size = qat_hash_get_digest_size(hash_alg);
269 if (digest_size <= 0)
272 hash_state_out_be32 = (uint32_t *)data_out;
273 hash_state_out_be64 = (uint64_t *)data_out;
276 case ICP_QAT_HW_AUTH_ALGO_SHA1:
277 if (partial_hash_sha1(data_in, digest))
279 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
280 *hash_state_out_be32 =
281 rte_bswap32(*(((uint32_t *)digest)+i));
283 case ICP_QAT_HW_AUTH_ALGO_SHA224:
284 if (partial_hash_sha224(data_in, digest))
286 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
287 *hash_state_out_be32 =
288 rte_bswap32(*(((uint32_t *)digest)+i));
290 case ICP_QAT_HW_AUTH_ALGO_SHA256:
291 if (partial_hash_sha256(data_in, digest))
293 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
294 *hash_state_out_be32 =
295 rte_bswap32(*(((uint32_t *)digest)+i));
297 case ICP_QAT_HW_AUTH_ALGO_SHA384:
298 if (partial_hash_sha384(data_in, digest))
300 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
301 *hash_state_out_be64 =
302 rte_bswap64(*(((uint64_t *)digest)+i));
304 case ICP_QAT_HW_AUTH_ALGO_SHA512:
305 if (partial_hash_sha512(data_in, digest))
307 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
308 *hash_state_out_be64 =
309 rte_bswap64(*(((uint64_t *)digest)+i));
311 case ICP_QAT_HW_AUTH_ALGO_MD5:
312 if (partial_hash_md5(data_in, data_out))
316 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
322 #define HMAC_IPAD_VALUE 0x36
323 #define HMAC_OPAD_VALUE 0x5c
324 #define HASH_XCBC_PRECOMP_KEY_NUM 3
326 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
327 const uint8_t *auth_key,
328 uint16_t auth_keylen,
329 uint8_t *p_state_buf,
330 uint16_t *p_state_len)
333 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
334 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
337 PMD_INIT_FUNC_TRACE();
338 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
339 static uint8_t qat_aes_xcbc_key_seed[
340 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
341 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
342 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
343 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
344 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
345 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
346 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
350 uint8_t *out = p_state_buf;
354 in = rte_zmalloc("working mem for key",
355 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
356 rte_memcpy(in, qat_aes_xcbc_key_seed,
357 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
358 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
359 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
362 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
364 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
365 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
368 AES_encrypt(in, out, &enc_key);
369 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
370 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
372 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
373 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
375 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
376 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
378 uint8_t *out = p_state_buf;
381 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
382 ICP_QAT_HW_GALOIS_LEN_A_SZ +
383 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
384 in = rte_zmalloc("working mem for key",
385 ICP_QAT_HW_GALOIS_H_SZ, 16);
386 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
387 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
391 AES_encrypt(in, out, &enc_key);
392 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
393 ICP_QAT_HW_GALOIS_LEN_A_SZ +
394 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
399 block_size = qat_hash_get_block_size(hash_alg);
402 /* init ipad and opad from key and xor with fixed values */
403 memset(ipad, 0, block_size);
404 memset(opad, 0, block_size);
406 if (auth_keylen > (unsigned int)block_size) {
407 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
410 rte_memcpy(ipad, auth_key, auth_keylen);
411 rte_memcpy(opad, auth_key, auth_keylen);
413 for (i = 0; i < block_size; i++) {
414 uint8_t *ipad_ptr = ipad + i;
415 uint8_t *opad_ptr = opad + i;
416 *ipad_ptr ^= HMAC_IPAD_VALUE;
417 *opad_ptr ^= HMAC_OPAD_VALUE;
420 /* do partial hash of ipad and copy to state1 */
421 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
422 memset(ipad, 0, block_size);
423 memset(opad, 0, block_size);
424 PMD_DRV_LOG(ERR, "ipad precompute failed");
429 * State len is a multiple of 8, so may be larger than the digest.
430 * Put the partial hash of opad state_len bytes after state1
432 *p_state_len = qat_hash_get_state1_size(hash_alg);
433 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
434 memset(ipad, 0, block_size);
435 memset(opad, 0, block_size);
436 PMD_DRV_LOG(ERR, "opad precompute failed");
440 /* don't leave data lying around */
441 memset(ipad, 0, block_size);
442 memset(opad, 0, block_size);
446 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
447 enum qat_crypto_proto_flag proto_flags)
449 PMD_INIT_FUNC_TRACE();
451 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
452 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
453 header->comn_req_flags =
454 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
455 QAT_COMN_PTR_TYPE_FLAT);
456 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
457 ICP_QAT_FW_LA_PARTIAL_NONE);
458 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
459 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
461 switch (proto_flags) {
462 case QAT_CRYPTO_PROTO_FLAG_NONE:
463 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
464 ICP_QAT_FW_LA_NO_PROTO);
466 case QAT_CRYPTO_PROTO_FLAG_CCM:
467 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
468 ICP_QAT_FW_LA_CCM_PROTO);
470 case QAT_CRYPTO_PROTO_FLAG_GCM:
471 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
472 ICP_QAT_FW_LA_GCM_PROTO);
474 case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
475 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
476 ICP_QAT_FW_LA_SNOW_3G_PROTO);
478 case QAT_CRYPTO_PROTO_FLAG_ZUC:
479 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
480 ICP_QAT_FW_LA_ZUC_3G_PROTO);
484 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
485 ICP_QAT_FW_LA_NO_UPDATE_STATE);
486 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
487 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
491 * Snow3G and ZUC should never use this function
492 * and set its protocol flag in both cipher and auth part of content
493 * descriptor building function
495 static enum qat_crypto_proto_flag
496 qat_get_crypto_proto_flag(uint16_t flags)
498 int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
499 enum qat_crypto_proto_flag qat_proto_flag =
500 QAT_CRYPTO_PROTO_FLAG_NONE;
503 case ICP_QAT_FW_LA_GCM_PROTO:
504 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
506 case ICP_QAT_FW_LA_CCM_PROTO:
507 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
511 return qat_proto_flag;
514 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
516 uint32_t cipherkeylen)
518 struct icp_qat_hw_cipher_algo_blk *cipher;
519 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
520 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
521 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
522 void *ptr = &req_tmpl->cd_ctrl;
523 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
524 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
525 enum icp_qat_hw_cipher_convert key_convert;
526 enum qat_crypto_proto_flag qat_proto_flag =
527 QAT_CRYPTO_PROTO_FLAG_NONE;
528 uint32_t total_key_size;
529 uint16_t cipher_offset, cd_size;
530 uint32_t wordIndex = 0;
531 uint32_t *temp_key = NULL;
532 PMD_INIT_FUNC_TRACE();
534 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
535 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
536 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
537 ICP_QAT_FW_SLICE_CIPHER);
538 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
539 ICP_QAT_FW_SLICE_DRAM_WR);
540 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
541 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
542 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
543 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
544 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
545 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
546 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
547 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
548 ICP_QAT_FW_SLICE_CIPHER);
549 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
550 ICP_QAT_FW_SLICE_AUTH);
551 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
552 ICP_QAT_FW_SLICE_AUTH);
553 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
554 ICP_QAT_FW_SLICE_DRAM_WR);
555 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
556 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
557 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
561 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
563 * CTR Streaming ciphers are a special case. Decrypt = encrypt
564 * Overriding default values previously set
566 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
567 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
568 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
569 || cdesc->qat_cipher_alg ==
570 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
571 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
572 else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
573 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
575 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
577 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
578 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
579 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
580 cipher_cd_ctrl->cipher_state_sz =
581 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
582 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
584 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
585 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
586 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
587 cipher_cd_ctrl->cipher_padding_sz =
588 (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
589 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
590 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
591 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
593 qat_get_crypto_proto_flag(header->serv_specif_flags);
594 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
595 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
596 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
598 qat_get_crypto_proto_flag(header->serv_specif_flags);
599 } else if (cdesc->qat_cipher_alg ==
600 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
601 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
602 ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
603 cipher_cd_ctrl->cipher_state_sz =
604 ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
605 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
607 total_key_size = cipherkeylen;
608 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
610 qat_get_crypto_proto_flag(header->serv_specif_flags);
612 cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
613 cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
614 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
616 header->service_cmd_id = cdesc->qat_cmd;
617 qat_alg_init_common_hdr(header, qat_proto_flag);
619 cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
620 cipher->cipher_config.val =
621 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
622 cdesc->qat_cipher_alg, key_convert,
625 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
626 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
627 sizeof(struct icp_qat_hw_cipher_config)
629 memcpy(cipher->key, cipherkey, cipherkeylen);
630 memcpy(temp_key, cipherkey, cipherkeylen);
632 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
633 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
635 temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
637 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
638 cipherkeylen + cipherkeylen;
640 memcpy(cipher->key, cipherkey, cipherkeylen);
641 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
645 if (total_key_size > cipherkeylen) {
646 uint32_t padding_size = total_key_size-cipherkeylen;
647 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
648 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
649 /* K3 not provided so use K1 = K3*/
650 memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
652 memset(cdesc->cd_cur_ptr, 0, padding_size);
653 cdesc->cd_cur_ptr += padding_size;
655 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
656 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
661 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
664 uint32_t add_auth_data_length,
666 unsigned int operation)
668 struct icp_qat_hw_auth_setup *hash;
669 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
670 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
671 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
672 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
673 void *ptr = &req_tmpl->cd_ctrl;
674 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
675 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
676 struct icp_qat_fw_la_auth_req_params *auth_param =
677 (struct icp_qat_fw_la_auth_req_params *)
678 ((char *)&req_tmpl->serv_specif_rqpars +
679 sizeof(struct icp_qat_fw_la_cipher_req_params));
680 uint16_t state1_size = 0, state2_size = 0;
681 uint16_t hash_offset, cd_size;
682 uint32_t *aad_len = NULL;
683 uint32_t wordIndex = 0;
685 enum qat_crypto_proto_flag qat_proto_flag =
686 QAT_CRYPTO_PROTO_FLAG_NONE;
688 PMD_INIT_FUNC_TRACE();
690 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
691 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
692 ICP_QAT_FW_SLICE_AUTH);
693 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
694 ICP_QAT_FW_SLICE_DRAM_WR);
695 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
696 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
697 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
698 ICP_QAT_FW_SLICE_AUTH);
699 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
700 ICP_QAT_FW_SLICE_CIPHER);
701 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
702 ICP_QAT_FW_SLICE_CIPHER);
703 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
704 ICP_QAT_FW_SLICE_DRAM_WR);
705 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
706 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
707 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
711 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
712 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
713 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
714 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
715 ICP_QAT_FW_LA_CMP_AUTH_RES);
716 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
718 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
719 ICP_QAT_FW_LA_RET_AUTH_RES);
720 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
721 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
722 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
726 * Setup the inner hash config
728 hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
729 hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
730 hash->auth_config.reserved = 0;
731 hash->auth_config.config =
732 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
733 cdesc->qat_hash_alg, digestsize);
735 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
736 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
737 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
738 hash->auth_counter.counter = 0;
740 hash->auth_counter.counter = rte_bswap32(
741 qat_hash_get_block_size(cdesc->qat_hash_alg));
743 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
746 * cd_cur_ptr now points at the state1 information.
748 switch (cdesc->qat_hash_alg) {
749 case ICP_QAT_HW_AUTH_ALGO_SHA1:
750 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
751 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
752 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
755 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
757 case ICP_QAT_HW_AUTH_ALGO_SHA224:
758 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
759 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
760 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
763 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
765 case ICP_QAT_HW_AUTH_ALGO_SHA256:
766 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
767 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
768 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
771 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
773 case ICP_QAT_HW_AUTH_ALGO_SHA384:
774 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
775 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
776 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
779 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
781 case ICP_QAT_HW_AUTH_ALGO_SHA512:
782 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
783 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
784 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
787 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
789 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
790 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
791 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
792 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
794 PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
798 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
799 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
800 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
801 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
802 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
803 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
805 PMD_DRV_LOG(ERR, "(GCM)precompute failed");
809 * Write (the length of AAD) into bytes 16-19 of state2
810 * in big-endian format. This field is 8 bytes
812 auth_param->u2.aad_sz =
813 RTE_ALIGN_CEIL(add_auth_data_length, 16);
814 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
816 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
817 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
818 ICP_QAT_HW_GALOIS_H_SZ);
819 *aad_len = rte_bswap32(add_auth_data_length);
820 cdesc->aad_len = add_auth_data_length;
822 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
823 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
824 state1_size = qat_hash_get_state1_size(
825 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
826 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
827 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
829 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
830 (cdesc->cd_cur_ptr + state1_size + state2_size);
831 cipherconfig->cipher_config.val =
832 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
833 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
834 ICP_QAT_HW_CIPHER_KEY_CONVERT,
835 ICP_QAT_HW_CIPHER_ENCRYPT);
836 memcpy(cipherconfig->key, authkey, authkeylen);
837 memset(cipherconfig->key + authkeylen,
838 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
839 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
840 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
841 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
843 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
844 hash->auth_config.config =
845 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
846 cdesc->qat_hash_alg, digestsize);
847 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
848 state1_size = qat_hash_get_state1_size(
849 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
850 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
851 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
852 + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
854 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
855 cdesc->cd_cur_ptr += state1_size + state2_size
856 + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
857 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
860 case ICP_QAT_HW_AUTH_ALGO_MD5:
861 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
862 authkey, authkeylen, cdesc->cd_cur_ptr,
864 PMD_DRV_LOG(ERR, "(MD5)precompute failed");
867 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
869 case ICP_QAT_HW_AUTH_ALGO_NULL:
871 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
872 state1_size = qat_hash_get_state1_size(
873 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
874 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
875 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
876 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
879 * The Inner Hash Initial State2 block must contain IK
880 * (Initialisation Key), followed by IK XOR-ed with KM
881 * (Key Modifier): IK||(IK^KM).
883 /* write the auth key */
884 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
885 /* initialise temp key with auth key */
886 memcpy(pTempKey, authkey, authkeylen);
887 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
888 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
889 pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
892 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
896 /* Request template setup */
897 qat_alg_init_common_hdr(header, qat_proto_flag);
898 header->service_cmd_id = cdesc->qat_cmd;
900 /* Auth CD config setup */
901 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
902 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
903 hash_cd_ctrl->inner_res_sz = digestsize;
904 hash_cd_ctrl->final_sz = digestsize;
905 hash_cd_ctrl->inner_state1_sz = state1_size;
906 auth_param->auth_res_sz = digestsize;
908 hash_cd_ctrl->inner_state2_sz = state2_size;
909 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
910 ((sizeof(struct icp_qat_hw_auth_setup) +
911 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
914 cdesc->cd_cur_ptr += state1_size + state2_size;
915 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
917 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
918 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
923 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
926 case ICP_QAT_HW_AES_128_KEY_SZ:
927 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
929 case ICP_QAT_HW_AES_192_KEY_SZ:
930 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
932 case ICP_QAT_HW_AES_256_KEY_SZ:
933 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
941 int qat_alg_validate_aes_docsisbpi_key(int key_len,
942 enum icp_qat_hw_cipher_algo *alg)
945 case ICP_QAT_HW_AES_128_KEY_SZ:
946 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
954 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
957 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
958 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
966 int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
969 case ICP_QAT_HW_KASUMI_KEY_SZ:
970 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
978 int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
981 case ICP_QAT_HW_DES_KEY_SZ:
982 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
990 int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
993 case QAT_3DES_KEY_SZ_OPT1:
994 case QAT_3DES_KEY_SZ_OPT2:
995 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
1003 int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
1006 case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
1007 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;