2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
6 * Copyright(c) 2015-2016 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Contact Information:
20 * Copyright(c) 2015-2016 Intel Corporation.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
56 #include "../qat_logs.h"
59 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
61 #include <openssl/md5.h> /* Needed to calculate pre-compute values */
65 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
66 * This is digest size rounded up to nearest quadword
68 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
70 switch (qat_hash_alg) {
71 case ICP_QAT_HW_AUTH_ALGO_SHA1:
72 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
73 QAT_HW_DEFAULT_ALIGNMENT);
74 case ICP_QAT_HW_AUTH_ALGO_SHA224:
75 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
76 QAT_HW_DEFAULT_ALIGNMENT);
77 case ICP_QAT_HW_AUTH_ALGO_SHA256:
78 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
79 QAT_HW_DEFAULT_ALIGNMENT);
80 case ICP_QAT_HW_AUTH_ALGO_SHA384:
81 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
82 QAT_HW_DEFAULT_ALIGNMENT);
83 case ICP_QAT_HW_AUTH_ALGO_SHA512:
84 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
85 QAT_HW_DEFAULT_ALIGNMENT);
86 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
87 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
88 QAT_HW_DEFAULT_ALIGNMENT);
89 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
90 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
91 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
92 QAT_HW_DEFAULT_ALIGNMENT);
93 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
94 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
95 QAT_HW_DEFAULT_ALIGNMENT);
96 case ICP_QAT_HW_AUTH_ALGO_MD5:
97 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
98 QAT_HW_DEFAULT_ALIGNMENT);
99 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
100 /* return maximum state1 size in this case */
101 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
102 QAT_HW_DEFAULT_ALIGNMENT);
104 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
110 /* returns digest size in bytes per hash algo */
111 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
113 switch (qat_hash_alg) {
114 case ICP_QAT_HW_AUTH_ALGO_SHA1:
115 return ICP_QAT_HW_SHA1_STATE1_SZ;
116 case ICP_QAT_HW_AUTH_ALGO_SHA224:
117 return ICP_QAT_HW_SHA224_STATE1_SZ;
118 case ICP_QAT_HW_AUTH_ALGO_SHA256:
119 return ICP_QAT_HW_SHA256_STATE1_SZ;
120 case ICP_QAT_HW_AUTH_ALGO_SHA384:
121 return ICP_QAT_HW_SHA384_STATE1_SZ;
122 case ICP_QAT_HW_AUTH_ALGO_SHA512:
123 return ICP_QAT_HW_SHA512_STATE1_SZ;
124 case ICP_QAT_HW_AUTH_ALGO_MD5:
125 return ICP_QAT_HW_MD5_STATE1_SZ;
126 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
127 /* return maximum digest size in this case */
128 return ICP_QAT_HW_SHA512_STATE1_SZ;
130 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
136 /* returns block size in byes per hash algo */
137 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
139 switch (qat_hash_alg) {
140 case ICP_QAT_HW_AUTH_ALGO_SHA1:
142 case ICP_QAT_HW_AUTH_ALGO_SHA224:
143 return SHA256_CBLOCK;
144 case ICP_QAT_HW_AUTH_ALGO_SHA256:
145 return SHA256_CBLOCK;
146 case ICP_QAT_HW_AUTH_ALGO_SHA384:
147 return SHA512_CBLOCK;
148 case ICP_QAT_HW_AUTH_ALGO_SHA512:
149 return SHA512_CBLOCK;
150 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
152 case ICP_QAT_HW_AUTH_ALGO_MD5:
154 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
155 /* return maximum block size in this case */
156 return SHA512_CBLOCK;
158 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
164 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
168 if (!SHA1_Init(&ctx))
170 SHA1_Transform(&ctx, data_in);
171 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
175 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
179 if (!SHA224_Init(&ctx))
181 SHA256_Transform(&ctx, data_in);
182 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
186 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
190 if (!SHA256_Init(&ctx))
192 SHA256_Transform(&ctx, data_in);
193 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
197 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
201 if (!SHA384_Init(&ctx))
203 SHA512_Transform(&ctx, data_in);
204 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
208 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
212 if (!SHA512_Init(&ctx))
214 SHA512_Transform(&ctx, data_in);
215 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
219 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
225 MD5_Transform(&ctx, data_in);
226 rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
231 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
236 uint8_t digest[qat_hash_get_digest_size(
237 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
238 uint32_t *hash_state_out_be32;
239 uint64_t *hash_state_out_be64;
242 PMD_INIT_FUNC_TRACE();
243 digest_size = qat_hash_get_digest_size(hash_alg);
244 if (digest_size <= 0)
247 hash_state_out_be32 = (uint32_t *)data_out;
248 hash_state_out_be64 = (uint64_t *)data_out;
251 case ICP_QAT_HW_AUTH_ALGO_SHA1:
252 if (partial_hash_sha1(data_in, digest))
254 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
255 *hash_state_out_be32 =
256 rte_bswap32(*(((uint32_t *)digest)+i));
258 case ICP_QAT_HW_AUTH_ALGO_SHA224:
259 if (partial_hash_sha224(data_in, digest))
261 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
262 *hash_state_out_be32 =
263 rte_bswap32(*(((uint32_t *)digest)+i));
265 case ICP_QAT_HW_AUTH_ALGO_SHA256:
266 if (partial_hash_sha256(data_in, digest))
268 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
269 *hash_state_out_be32 =
270 rte_bswap32(*(((uint32_t *)digest)+i));
272 case ICP_QAT_HW_AUTH_ALGO_SHA384:
273 if (partial_hash_sha384(data_in, digest))
275 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
276 *hash_state_out_be64 =
277 rte_bswap64(*(((uint64_t *)digest)+i));
279 case ICP_QAT_HW_AUTH_ALGO_SHA512:
280 if (partial_hash_sha512(data_in, digest))
282 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
283 *hash_state_out_be64 =
284 rte_bswap64(*(((uint64_t *)digest)+i));
286 case ICP_QAT_HW_AUTH_ALGO_MD5:
287 if (partial_hash_md5(data_in, data_out))
291 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
297 #define HMAC_IPAD_VALUE 0x36
298 #define HMAC_OPAD_VALUE 0x5c
299 #define HASH_XCBC_PRECOMP_KEY_NUM 3
301 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
302 const uint8_t *auth_key,
303 uint16_t auth_keylen,
304 uint8_t *p_state_buf,
305 uint16_t *p_state_len)
308 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
309 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
312 PMD_INIT_FUNC_TRACE();
313 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
314 static uint8_t qat_aes_xcbc_key_seed[
315 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
316 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
317 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
318 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
319 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
320 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
321 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
325 uint8_t *out = p_state_buf;
329 in = rte_zmalloc("working mem for key",
330 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
331 rte_memcpy(in, qat_aes_xcbc_key_seed,
332 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
333 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
334 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
337 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
339 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
340 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
343 AES_encrypt(in, out, &enc_key);
344 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
345 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
347 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
348 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
350 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
351 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
353 uint8_t *out = p_state_buf;
356 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
357 ICP_QAT_HW_GALOIS_LEN_A_SZ +
358 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
359 in = rte_zmalloc("working mem for key",
360 ICP_QAT_HW_GALOIS_H_SZ, 16);
361 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
362 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
366 AES_encrypt(in, out, &enc_key);
367 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
368 ICP_QAT_HW_GALOIS_LEN_A_SZ +
369 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
374 block_size = qat_hash_get_block_size(hash_alg);
377 /* init ipad and opad from key and xor with fixed values */
378 memset(ipad, 0, block_size);
379 memset(opad, 0, block_size);
381 if (auth_keylen > (unsigned int)block_size) {
382 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
385 rte_memcpy(ipad, auth_key, auth_keylen);
386 rte_memcpy(opad, auth_key, auth_keylen);
388 for (i = 0; i < block_size; i++) {
389 uint8_t *ipad_ptr = ipad + i;
390 uint8_t *opad_ptr = opad + i;
391 *ipad_ptr ^= HMAC_IPAD_VALUE;
392 *opad_ptr ^= HMAC_OPAD_VALUE;
395 /* do partial hash of ipad and copy to state1 */
396 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
397 memset(ipad, 0, block_size);
398 memset(opad, 0, block_size);
399 PMD_DRV_LOG(ERR, "ipad precompute failed");
404 * State len is a multiple of 8, so may be larger than the digest.
405 * Put the partial hash of opad state_len bytes after state1
407 *p_state_len = qat_hash_get_state1_size(hash_alg);
408 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
409 memset(ipad, 0, block_size);
410 memset(opad, 0, block_size);
411 PMD_DRV_LOG(ERR, "opad precompute failed");
415 /* don't leave data lying around */
416 memset(ipad, 0, block_size);
417 memset(opad, 0, block_size);
421 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
424 PMD_INIT_FUNC_TRACE();
426 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
427 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
428 header->comn_req_flags =
429 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
430 QAT_COMN_PTR_TYPE_FLAT);
431 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
432 ICP_QAT_FW_LA_PARTIAL_NONE);
433 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
434 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
435 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
437 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
438 ICP_QAT_FW_LA_NO_UPDATE_STATE);
441 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
443 uint32_t cipherkeylen)
445 struct icp_qat_hw_cipher_algo_blk *cipher;
446 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
447 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
448 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
449 void *ptr = &req_tmpl->cd_ctrl;
450 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
451 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
452 enum icp_qat_hw_cipher_convert key_convert;
453 uint32_t total_key_size;
454 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
455 uint16_t cipher_offset, cd_size;
457 PMD_INIT_FUNC_TRACE();
459 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
460 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
461 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
462 ICP_QAT_FW_SLICE_CIPHER);
463 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
464 ICP_QAT_FW_SLICE_DRAM_WR);
465 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
466 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
467 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
468 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
469 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
470 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
471 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
472 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
473 ICP_QAT_FW_SLICE_CIPHER);
474 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
475 ICP_QAT_FW_SLICE_AUTH);
476 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
477 ICP_QAT_FW_SLICE_AUTH);
478 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
479 ICP_QAT_FW_SLICE_DRAM_WR);
480 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
481 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
482 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
486 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
488 * CTR Streaming ciphers are a special case. Decrypt = encrypt
489 * Overriding default values previously set
491 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
492 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
493 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
494 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
495 else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
496 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
498 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
500 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
501 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
502 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
503 cipher_cd_ctrl->cipher_state_sz =
504 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
505 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
507 total_key_size = cipherkeylen;
508 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
509 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
511 cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
512 cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
513 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
515 header->service_cmd_id = cdesc->qat_cmd;
516 qat_alg_init_common_hdr(header, proto);
518 cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
519 cipher->aes.cipher_config.val =
520 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
521 cdesc->qat_cipher_alg, key_convert,
523 memcpy(cipher->aes.key, cipherkey, cipherkeylen);
524 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
526 if (total_key_size > cipherkeylen) {
527 uint32_t padding_size = total_key_size-cipherkeylen;
529 memset(cdesc->cd_cur_ptr, 0, padding_size);
530 cdesc->cd_cur_ptr += padding_size;
532 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
533 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
538 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
541 uint32_t add_auth_data_length,
543 unsigned int operation)
545 struct icp_qat_hw_auth_setup *hash;
546 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
547 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
548 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
549 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
550 void *ptr = &req_tmpl->cd_ctrl;
551 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
552 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
553 struct icp_qat_fw_la_auth_req_params *auth_param =
554 (struct icp_qat_fw_la_auth_req_params *)
555 ((char *)&req_tmpl->serv_specif_rqpars +
556 sizeof(struct icp_qat_fw_la_cipher_req_params));
557 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
558 uint16_t state1_size = 0, state2_size = 0;
559 uint16_t hash_offset, cd_size;
560 uint32_t *aad_len = NULL;
562 PMD_INIT_FUNC_TRACE();
564 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
565 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
566 ICP_QAT_FW_SLICE_AUTH);
567 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
568 ICP_QAT_FW_SLICE_DRAM_WR);
569 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
570 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
571 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
572 ICP_QAT_FW_SLICE_AUTH);
573 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
574 ICP_QAT_FW_SLICE_CIPHER);
575 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
576 ICP_QAT_FW_SLICE_CIPHER);
577 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
578 ICP_QAT_FW_SLICE_DRAM_WR);
579 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
580 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
581 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
585 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
586 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
587 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
588 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
589 ICP_QAT_FW_LA_CMP_AUTH_RES);
591 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
592 ICP_QAT_FW_LA_RET_AUTH_RES);
593 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
594 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
598 * Setup the inner hash config
600 hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
601 hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
602 hash->auth_config.reserved = 0;
603 hash->auth_config.config =
604 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
605 cdesc->qat_hash_alg, digestsize);
607 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
608 hash->auth_counter.counter = 0;
610 hash->auth_counter.counter = rte_bswap32(
611 qat_hash_get_block_size(cdesc->qat_hash_alg));
613 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
616 * cd_cur_ptr now points at the state1 information.
618 switch (cdesc->qat_hash_alg) {
619 case ICP_QAT_HW_AUTH_ALGO_SHA1:
620 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
621 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
622 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
625 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
627 case ICP_QAT_HW_AUTH_ALGO_SHA224:
628 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
629 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
630 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
633 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
635 case ICP_QAT_HW_AUTH_ALGO_SHA256:
636 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
637 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
638 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
641 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
643 case ICP_QAT_HW_AUTH_ALGO_SHA384:
644 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
645 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
646 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
649 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
651 case ICP_QAT_HW_AUTH_ALGO_SHA512:
652 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
653 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
654 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
657 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
659 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
660 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
661 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
662 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
664 PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
668 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
669 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
670 proto = ICP_QAT_FW_LA_GCM_PROTO;
671 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
672 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
673 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
675 PMD_DRV_LOG(ERR, "(GCM)precompute failed");
679 * Write (the length of AAD) into bytes 16-19 of state2
680 * in big-endian format. This field is 8 bytes
682 auth_param->u2.aad_sz =
683 RTE_ALIGN_CEIL(add_auth_data_length, 16);
684 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
686 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
687 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
688 ICP_QAT_HW_GALOIS_H_SZ);
689 *aad_len = rte_bswap32(add_auth_data_length);
691 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
692 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
693 state1_size = qat_hash_get_state1_size(
694 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
695 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
696 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
698 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
699 (cdesc->cd_cur_ptr + state1_size + state2_size);
700 cipherconfig->aes.cipher_config.val =
701 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
702 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
703 ICP_QAT_HW_CIPHER_KEY_CONVERT,
704 ICP_QAT_HW_CIPHER_ENCRYPT);
705 memcpy(cipherconfig->aes.key, authkey, authkeylen);
706 memset(cipherconfig->aes.key + authkeylen,
707 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
708 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
709 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
710 auth_param->hash_state_sz =
711 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
713 case ICP_QAT_HW_AUTH_ALGO_MD5:
714 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
715 authkey, authkeylen, cdesc->cd_cur_ptr,
717 PMD_DRV_LOG(ERR, "(MD5)precompute failed");
720 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
723 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
727 /* Request template setup */
728 qat_alg_init_common_hdr(header, proto);
729 header->service_cmd_id = cdesc->qat_cmd;
731 /* Auth CD config setup */
732 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
733 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
734 hash_cd_ctrl->inner_res_sz = digestsize;
735 hash_cd_ctrl->final_sz = digestsize;
736 hash_cd_ctrl->inner_state1_sz = state1_size;
737 auth_param->auth_res_sz = digestsize;
739 hash_cd_ctrl->inner_state2_sz = state2_size;
740 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
741 ((sizeof(struct icp_qat_hw_auth_setup) +
742 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
745 cdesc->cd_cur_ptr += state1_size + state2_size;
746 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
748 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
749 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
754 static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
755 struct icp_qat_hw_cipher_algo_blk *cd,
756 const uint8_t *key, unsigned int keylen)
758 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
759 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
760 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
762 PMD_INIT_FUNC_TRACE();
763 rte_memcpy(cd->aes.key, key, keylen);
764 qat_alg_init_common_hdr(header, ICP_QAT_FW_LA_NO_PROTO);
765 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
766 cd_pars->u.s.content_desc_params_sz =
767 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
768 /* Cipher CD config setup */
769 cd_ctrl->cipher_key_sz = keylen >> 3;
770 cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
771 cd_ctrl->cipher_cfg_offset = 0;
772 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
773 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
776 void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
777 int alg, const uint8_t *key,
780 struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
781 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
782 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
784 PMD_INIT_FUNC_TRACE();
785 qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
786 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
787 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
790 void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
791 int alg, const uint8_t *key,
794 struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
795 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
796 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
798 PMD_INIT_FUNC_TRACE();
799 qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
800 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
801 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
804 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
807 case ICP_QAT_HW_AES_128_KEY_SZ:
808 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
810 case ICP_QAT_HW_AES_192_KEY_SZ:
811 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
813 case ICP_QAT_HW_AES_256_KEY_SZ:
814 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
822 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
825 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
826 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;