2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
6 * Copyright(c) 2015-2016 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Contact Information:
20 * Copyright(c) 2015-2016 Intel Corporation.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
56 #include "../qat_logs.h"
59 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
61 #include <openssl/md5.h> /* Needed to calculate pre-compute values */
65 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
66 * This is digest size rounded up to nearest quadword
68 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
70 switch (qat_hash_alg) {
71 case ICP_QAT_HW_AUTH_ALGO_SHA1:
72 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
73 QAT_HW_DEFAULT_ALIGNMENT);
74 case ICP_QAT_HW_AUTH_ALGO_SHA224:
75 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
76 QAT_HW_DEFAULT_ALIGNMENT);
77 case ICP_QAT_HW_AUTH_ALGO_SHA256:
78 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
79 QAT_HW_DEFAULT_ALIGNMENT);
80 case ICP_QAT_HW_AUTH_ALGO_SHA384:
81 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
82 QAT_HW_DEFAULT_ALIGNMENT);
83 case ICP_QAT_HW_AUTH_ALGO_SHA512:
84 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
85 QAT_HW_DEFAULT_ALIGNMENT);
86 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
87 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
88 QAT_HW_DEFAULT_ALIGNMENT);
89 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
90 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
91 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
92 QAT_HW_DEFAULT_ALIGNMENT);
93 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
94 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
95 QAT_HW_DEFAULT_ALIGNMENT);
96 case ICP_QAT_HW_AUTH_ALGO_MD5:
97 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
98 QAT_HW_DEFAULT_ALIGNMENT);
99 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
100 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
101 QAT_HW_DEFAULT_ALIGNMENT);
102 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
103 /* return maximum state1 size in this case */
104 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
105 QAT_HW_DEFAULT_ALIGNMENT);
107 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
113 /* returns digest size in bytes per hash algo */
114 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
116 switch (qat_hash_alg) {
117 case ICP_QAT_HW_AUTH_ALGO_SHA1:
118 return ICP_QAT_HW_SHA1_STATE1_SZ;
119 case ICP_QAT_HW_AUTH_ALGO_SHA224:
120 return ICP_QAT_HW_SHA224_STATE1_SZ;
121 case ICP_QAT_HW_AUTH_ALGO_SHA256:
122 return ICP_QAT_HW_SHA256_STATE1_SZ;
123 case ICP_QAT_HW_AUTH_ALGO_SHA384:
124 return ICP_QAT_HW_SHA384_STATE1_SZ;
125 case ICP_QAT_HW_AUTH_ALGO_SHA512:
126 return ICP_QAT_HW_SHA512_STATE1_SZ;
127 case ICP_QAT_HW_AUTH_ALGO_MD5:
128 return ICP_QAT_HW_MD5_STATE1_SZ;
129 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
130 /* return maximum digest size in this case */
131 return ICP_QAT_HW_SHA512_STATE1_SZ;
133 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
139 /* returns block size in byes per hash algo */
140 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
142 switch (qat_hash_alg) {
143 case ICP_QAT_HW_AUTH_ALGO_SHA1:
145 case ICP_QAT_HW_AUTH_ALGO_SHA224:
146 return SHA256_CBLOCK;
147 case ICP_QAT_HW_AUTH_ALGO_SHA256:
148 return SHA256_CBLOCK;
149 case ICP_QAT_HW_AUTH_ALGO_SHA384:
150 return SHA512_CBLOCK;
151 case ICP_QAT_HW_AUTH_ALGO_SHA512:
152 return SHA512_CBLOCK;
153 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
155 case ICP_QAT_HW_AUTH_ALGO_MD5:
157 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
158 /* return maximum block size in this case */
159 return SHA512_CBLOCK;
161 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
167 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
171 if (!SHA1_Init(&ctx))
173 SHA1_Transform(&ctx, data_in);
174 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
178 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
182 if (!SHA224_Init(&ctx))
184 SHA256_Transform(&ctx, data_in);
185 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
189 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
193 if (!SHA256_Init(&ctx))
195 SHA256_Transform(&ctx, data_in);
196 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
200 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
204 if (!SHA384_Init(&ctx))
206 SHA512_Transform(&ctx, data_in);
207 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
211 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
215 if (!SHA512_Init(&ctx))
217 SHA512_Transform(&ctx, data_in);
218 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
222 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
228 MD5_Transform(&ctx, data_in);
229 rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
234 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
239 uint8_t digest[qat_hash_get_digest_size(
240 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
241 uint32_t *hash_state_out_be32;
242 uint64_t *hash_state_out_be64;
245 PMD_INIT_FUNC_TRACE();
246 digest_size = qat_hash_get_digest_size(hash_alg);
247 if (digest_size <= 0)
250 hash_state_out_be32 = (uint32_t *)data_out;
251 hash_state_out_be64 = (uint64_t *)data_out;
254 case ICP_QAT_HW_AUTH_ALGO_SHA1:
255 if (partial_hash_sha1(data_in, digest))
257 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
258 *hash_state_out_be32 =
259 rte_bswap32(*(((uint32_t *)digest)+i));
261 case ICP_QAT_HW_AUTH_ALGO_SHA224:
262 if (partial_hash_sha224(data_in, digest))
264 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
265 *hash_state_out_be32 =
266 rte_bswap32(*(((uint32_t *)digest)+i));
268 case ICP_QAT_HW_AUTH_ALGO_SHA256:
269 if (partial_hash_sha256(data_in, digest))
271 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
272 *hash_state_out_be32 =
273 rte_bswap32(*(((uint32_t *)digest)+i));
275 case ICP_QAT_HW_AUTH_ALGO_SHA384:
276 if (partial_hash_sha384(data_in, digest))
278 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
279 *hash_state_out_be64 =
280 rte_bswap64(*(((uint64_t *)digest)+i));
282 case ICP_QAT_HW_AUTH_ALGO_SHA512:
283 if (partial_hash_sha512(data_in, digest))
285 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
286 *hash_state_out_be64 =
287 rte_bswap64(*(((uint64_t *)digest)+i));
289 case ICP_QAT_HW_AUTH_ALGO_MD5:
290 if (partial_hash_md5(data_in, data_out))
294 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
300 #define HMAC_IPAD_VALUE 0x36
301 #define HMAC_OPAD_VALUE 0x5c
302 #define HASH_XCBC_PRECOMP_KEY_NUM 3
304 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
305 const uint8_t *auth_key,
306 uint16_t auth_keylen,
307 uint8_t *p_state_buf,
308 uint16_t *p_state_len)
311 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
312 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
315 PMD_INIT_FUNC_TRACE();
316 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
317 static uint8_t qat_aes_xcbc_key_seed[
318 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
319 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
320 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
321 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
322 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
323 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
324 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
328 uint8_t *out = p_state_buf;
332 in = rte_zmalloc("working mem for key",
333 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
334 rte_memcpy(in, qat_aes_xcbc_key_seed,
335 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
336 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
337 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
340 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
342 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
343 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
346 AES_encrypt(in, out, &enc_key);
347 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
348 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
350 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
351 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
353 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
354 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
356 uint8_t *out = p_state_buf;
359 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
360 ICP_QAT_HW_GALOIS_LEN_A_SZ +
361 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
362 in = rte_zmalloc("working mem for key",
363 ICP_QAT_HW_GALOIS_H_SZ, 16);
364 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
365 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
369 AES_encrypt(in, out, &enc_key);
370 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
371 ICP_QAT_HW_GALOIS_LEN_A_SZ +
372 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
377 block_size = qat_hash_get_block_size(hash_alg);
380 /* init ipad and opad from key and xor with fixed values */
381 memset(ipad, 0, block_size);
382 memset(opad, 0, block_size);
384 if (auth_keylen > (unsigned int)block_size) {
385 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
388 rte_memcpy(ipad, auth_key, auth_keylen);
389 rte_memcpy(opad, auth_key, auth_keylen);
391 for (i = 0; i < block_size; i++) {
392 uint8_t *ipad_ptr = ipad + i;
393 uint8_t *opad_ptr = opad + i;
394 *ipad_ptr ^= HMAC_IPAD_VALUE;
395 *opad_ptr ^= HMAC_OPAD_VALUE;
398 /* do partial hash of ipad and copy to state1 */
399 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
400 memset(ipad, 0, block_size);
401 memset(opad, 0, block_size);
402 PMD_DRV_LOG(ERR, "ipad precompute failed");
407 * State len is a multiple of 8, so may be larger than the digest.
408 * Put the partial hash of opad state_len bytes after state1
410 *p_state_len = qat_hash_get_state1_size(hash_alg);
411 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
412 memset(ipad, 0, block_size);
413 memset(opad, 0, block_size);
414 PMD_DRV_LOG(ERR, "opad precompute failed");
418 /* don't leave data lying around */
419 memset(ipad, 0, block_size);
420 memset(opad, 0, block_size);
424 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
427 PMD_INIT_FUNC_TRACE();
429 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
430 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
431 header->comn_req_flags =
432 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
433 QAT_COMN_PTR_TYPE_FLAT);
434 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
435 ICP_QAT_FW_LA_PARTIAL_NONE);
436 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
437 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
438 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
440 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
441 ICP_QAT_FW_LA_NO_UPDATE_STATE);
444 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
446 uint32_t cipherkeylen)
448 struct icp_qat_hw_cipher_algo_blk *cipher;
449 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
450 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
451 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
452 void *ptr = &req_tmpl->cd_ctrl;
453 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
454 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
455 enum icp_qat_hw_cipher_convert key_convert;
456 uint32_t total_key_size;
457 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
458 uint16_t cipher_offset, cd_size;
459 uint32_t wordIndex = 0;
460 uint32_t *temp_key = NULL;
461 PMD_INIT_FUNC_TRACE();
463 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
464 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
465 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
466 ICP_QAT_FW_SLICE_CIPHER);
467 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
468 ICP_QAT_FW_SLICE_DRAM_WR);
469 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
470 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
471 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
472 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
473 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
474 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
475 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
476 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
477 ICP_QAT_FW_SLICE_CIPHER);
478 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
479 ICP_QAT_FW_SLICE_AUTH);
480 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
481 ICP_QAT_FW_SLICE_AUTH);
482 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
483 ICP_QAT_FW_SLICE_DRAM_WR);
484 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
485 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
486 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
490 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
492 * CTR Streaming ciphers are a special case. Decrypt = encrypt
493 * Overriding default values previously set
495 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
496 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
497 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
498 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
499 else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
500 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
502 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
504 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
505 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
506 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
507 cipher_cd_ctrl->cipher_state_sz =
508 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
509 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
510 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
511 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
512 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
513 cipher_cd_ctrl->cipher_padding_sz =
514 (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
516 total_key_size = cipherkeylen;
517 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
518 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
520 cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
521 cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
522 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
524 header->service_cmd_id = cdesc->qat_cmd;
525 qat_alg_init_common_hdr(header, proto);
527 cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
528 cipher->aes.cipher_config.val =
529 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
530 cdesc->qat_cipher_alg, key_convert,
533 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
534 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
535 sizeof(struct icp_qat_hw_cipher_config)
537 memcpy(cipher->aes.key, cipherkey, cipherkeylen);
538 memcpy(temp_key, cipherkey, cipherkeylen);
540 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
541 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
543 temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
545 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
546 cipherkeylen + cipherkeylen;
548 memcpy(cipher->aes.key, cipherkey, cipherkeylen);
549 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
553 if (total_key_size > cipherkeylen) {
554 uint32_t padding_size = total_key_size-cipherkeylen;
556 memset(cdesc->cd_cur_ptr, 0, padding_size);
557 cdesc->cd_cur_ptr += padding_size;
559 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
560 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
565 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
568 uint32_t add_auth_data_length,
570 unsigned int operation)
572 struct icp_qat_hw_auth_setup *hash;
573 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
574 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
575 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
576 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
577 void *ptr = &req_tmpl->cd_ctrl;
578 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
579 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
580 struct icp_qat_fw_la_auth_req_params *auth_param =
581 (struct icp_qat_fw_la_auth_req_params *)
582 ((char *)&req_tmpl->serv_specif_rqpars +
583 sizeof(struct icp_qat_fw_la_cipher_req_params));
584 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
585 uint16_t state1_size = 0, state2_size = 0;
586 uint16_t hash_offset, cd_size;
587 uint32_t *aad_len = NULL;
588 uint32_t wordIndex = 0;
591 PMD_INIT_FUNC_TRACE();
593 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
594 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
595 ICP_QAT_FW_SLICE_AUTH);
596 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
597 ICP_QAT_FW_SLICE_DRAM_WR);
598 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
599 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
600 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
601 ICP_QAT_FW_SLICE_AUTH);
602 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
603 ICP_QAT_FW_SLICE_CIPHER);
604 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
605 ICP_QAT_FW_SLICE_CIPHER);
606 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
607 ICP_QAT_FW_SLICE_DRAM_WR);
608 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
609 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
610 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
614 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
615 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
616 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
617 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
618 ICP_QAT_FW_LA_CMP_AUTH_RES);
620 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
621 ICP_QAT_FW_LA_RET_AUTH_RES);
622 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
623 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
627 * Setup the inner hash config
629 hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
630 hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
631 hash->auth_config.reserved = 0;
632 hash->auth_config.config =
633 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
634 cdesc->qat_hash_alg, digestsize);
636 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
637 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9)
638 hash->auth_counter.counter = 0;
640 hash->auth_counter.counter = rte_bswap32(
641 qat_hash_get_block_size(cdesc->qat_hash_alg));
643 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
646 * cd_cur_ptr now points at the state1 information.
648 switch (cdesc->qat_hash_alg) {
649 case ICP_QAT_HW_AUTH_ALGO_SHA1:
650 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
651 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
652 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
655 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
657 case ICP_QAT_HW_AUTH_ALGO_SHA224:
658 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
659 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
660 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
663 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
665 case ICP_QAT_HW_AUTH_ALGO_SHA256:
666 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
667 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
668 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
671 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
673 case ICP_QAT_HW_AUTH_ALGO_SHA384:
674 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
675 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
676 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
679 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
681 case ICP_QAT_HW_AUTH_ALGO_SHA512:
682 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
683 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
684 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
687 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
689 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
690 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
691 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
692 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
694 PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
698 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
699 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
700 proto = ICP_QAT_FW_LA_GCM_PROTO;
701 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
702 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
703 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
705 PMD_DRV_LOG(ERR, "(GCM)precompute failed");
709 * Write (the length of AAD) into bytes 16-19 of state2
710 * in big-endian format. This field is 8 bytes
712 auth_param->u2.aad_sz =
713 RTE_ALIGN_CEIL(add_auth_data_length, 16);
714 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
716 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
717 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
718 ICP_QAT_HW_GALOIS_H_SZ);
719 *aad_len = rte_bswap32(add_auth_data_length);
721 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
722 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
723 state1_size = qat_hash_get_state1_size(
724 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
725 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
726 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
728 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
729 (cdesc->cd_cur_ptr + state1_size + state2_size);
730 cipherconfig->aes.cipher_config.val =
731 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
732 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
733 ICP_QAT_HW_CIPHER_KEY_CONVERT,
734 ICP_QAT_HW_CIPHER_ENCRYPT);
735 memcpy(cipherconfig->aes.key, authkey, authkeylen);
736 memset(cipherconfig->aes.key + authkeylen,
737 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
738 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
739 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
740 auth_param->hash_state_sz =
741 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
743 case ICP_QAT_HW_AUTH_ALGO_MD5:
744 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
745 authkey, authkeylen, cdesc->cd_cur_ptr,
747 PMD_DRV_LOG(ERR, "(MD5)precompute failed");
750 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
752 case ICP_QAT_HW_AUTH_ALGO_NULL:
754 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
755 state1_size = qat_hash_get_state1_size(
756 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
757 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
758 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
759 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
762 * The Inner Hash Initial State2 block must contain IK
763 * (Initialisation Key), followed by IK XOR-ed with KM
764 * (Key Modifier): IK||(IK^KM).
766 /* write the auth key */
767 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
768 /* initialise temp key with auth key */
769 memcpy(pTempKey, authkey, authkeylen);
770 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
771 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
772 pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
775 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
779 /* Request template setup */
780 qat_alg_init_common_hdr(header, proto);
781 header->service_cmd_id = cdesc->qat_cmd;
783 /* Auth CD config setup */
784 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
785 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
786 hash_cd_ctrl->inner_res_sz = digestsize;
787 hash_cd_ctrl->final_sz = digestsize;
788 hash_cd_ctrl->inner_state1_sz = state1_size;
789 auth_param->auth_res_sz = digestsize;
791 hash_cd_ctrl->inner_state2_sz = state2_size;
792 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
793 ((sizeof(struct icp_qat_hw_auth_setup) +
794 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
797 cdesc->cd_cur_ptr += state1_size + state2_size;
798 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
800 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
801 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
806 static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
807 struct icp_qat_hw_cipher_algo_blk *cd,
808 const uint8_t *key, unsigned int keylen)
810 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
811 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
812 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
814 PMD_INIT_FUNC_TRACE();
815 rte_memcpy(cd->aes.key, key, keylen);
816 qat_alg_init_common_hdr(header, ICP_QAT_FW_LA_NO_PROTO);
817 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
818 cd_pars->u.s.content_desc_params_sz =
819 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
820 /* Cipher CD config setup */
821 cd_ctrl->cipher_key_sz = keylen >> 3;
822 cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
823 cd_ctrl->cipher_cfg_offset = 0;
824 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
825 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
828 void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
829 int alg, const uint8_t *key,
832 struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
833 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
834 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
836 PMD_INIT_FUNC_TRACE();
837 qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
838 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
839 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
842 void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
843 int alg, const uint8_t *key,
846 struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
847 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
848 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
850 PMD_INIT_FUNC_TRACE();
851 qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
852 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
853 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
856 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
859 case ICP_QAT_HW_AES_128_KEY_SZ:
860 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
862 case ICP_QAT_HW_AES_192_KEY_SZ:
863 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
865 case ICP_QAT_HW_AES_256_KEY_SZ:
866 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
874 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
877 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
878 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
886 int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
889 case ICP_QAT_HW_KASUMI_KEY_SZ:
890 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;