2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
6 * Copyright(c) 2015-2016 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Contact Information:
20 * Copyright(c) 2015-2016 Intel Corporation.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
56 #include "../qat_logs.h"
59 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
61 #include <openssl/md5.h> /* Needed to calculate pre-compute values */
65 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
66 * This is digest size rounded up to nearest quadword
68 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
70 switch (qat_hash_alg) {
71 case ICP_QAT_HW_AUTH_ALGO_SHA1:
72 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
73 QAT_HW_DEFAULT_ALIGNMENT);
74 case ICP_QAT_HW_AUTH_ALGO_SHA256:
75 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
76 QAT_HW_DEFAULT_ALIGNMENT);
77 case ICP_QAT_HW_AUTH_ALGO_SHA512:
78 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
79 QAT_HW_DEFAULT_ALIGNMENT);
80 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
81 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
82 QAT_HW_DEFAULT_ALIGNMENT);
83 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
84 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
85 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
86 QAT_HW_DEFAULT_ALIGNMENT);
87 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
88 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
89 QAT_HW_DEFAULT_ALIGNMENT);
90 case ICP_QAT_HW_AUTH_ALGO_MD5:
91 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
92 QAT_HW_DEFAULT_ALIGNMENT);
93 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
94 /* return maximum state1 size in this case */
95 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
96 QAT_HW_DEFAULT_ALIGNMENT);
98 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
104 /* returns digest size in bytes per hash algo */
105 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
107 switch (qat_hash_alg) {
108 case ICP_QAT_HW_AUTH_ALGO_SHA1:
109 return ICP_QAT_HW_SHA1_STATE1_SZ;
110 case ICP_QAT_HW_AUTH_ALGO_SHA256:
111 return ICP_QAT_HW_SHA256_STATE1_SZ;
112 case ICP_QAT_HW_AUTH_ALGO_SHA512:
113 return ICP_QAT_HW_SHA512_STATE1_SZ;
114 case ICP_QAT_HW_AUTH_ALGO_MD5:
115 return ICP_QAT_HW_MD5_STATE1_SZ;
116 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
117 /* return maximum digest size in this case */
118 return ICP_QAT_HW_SHA512_STATE1_SZ;
120 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
126 /* returns block size in byes per hash algo */
127 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
129 switch (qat_hash_alg) {
130 case ICP_QAT_HW_AUTH_ALGO_SHA1:
132 case ICP_QAT_HW_AUTH_ALGO_SHA256:
133 return SHA256_CBLOCK;
134 case ICP_QAT_HW_AUTH_ALGO_SHA512:
135 return SHA512_CBLOCK;
136 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
138 case ICP_QAT_HW_AUTH_ALGO_MD5:
140 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
141 /* return maximum block size in this case */
142 return SHA512_CBLOCK;
144 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
150 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
154 if (!SHA1_Init(&ctx))
156 SHA1_Transform(&ctx, data_in);
157 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
161 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
165 if (!SHA256_Init(&ctx))
167 SHA256_Transform(&ctx, data_in);
168 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
172 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
176 if (!SHA512_Init(&ctx))
178 SHA512_Transform(&ctx, data_in);
179 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
183 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
189 MD5_Transform(&ctx, data_in);
190 rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
195 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
200 uint8_t digest[qat_hash_get_digest_size(
201 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
202 uint32_t *hash_state_out_be32;
203 uint64_t *hash_state_out_be64;
206 PMD_INIT_FUNC_TRACE();
207 digest_size = qat_hash_get_digest_size(hash_alg);
208 if (digest_size <= 0)
211 hash_state_out_be32 = (uint32_t *)data_out;
212 hash_state_out_be64 = (uint64_t *)data_out;
215 case ICP_QAT_HW_AUTH_ALGO_SHA1:
216 if (partial_hash_sha1(data_in, digest))
218 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
219 *hash_state_out_be32 =
220 rte_bswap32(*(((uint32_t *)digest)+i));
222 case ICP_QAT_HW_AUTH_ALGO_SHA256:
223 if (partial_hash_sha256(data_in, digest))
225 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
226 *hash_state_out_be32 =
227 rte_bswap32(*(((uint32_t *)digest)+i));
229 case ICP_QAT_HW_AUTH_ALGO_SHA512:
230 if (partial_hash_sha512(data_in, digest))
232 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
233 *hash_state_out_be64 =
234 rte_bswap64(*(((uint64_t *)digest)+i));
236 case ICP_QAT_HW_AUTH_ALGO_MD5:
237 if (partial_hash_md5(data_in, data_out))
241 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
247 #define HMAC_IPAD_VALUE 0x36
248 #define HMAC_OPAD_VALUE 0x5c
249 #define HASH_XCBC_PRECOMP_KEY_NUM 3
251 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
252 const uint8_t *auth_key,
253 uint16_t auth_keylen,
254 uint8_t *p_state_buf,
255 uint16_t *p_state_len)
258 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
259 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
262 PMD_INIT_FUNC_TRACE();
263 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
264 static uint8_t qat_aes_xcbc_key_seed[
265 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
266 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
267 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
268 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
269 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
270 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
271 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
275 uint8_t *out = p_state_buf;
279 in = rte_zmalloc("working mem for key",
280 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
281 rte_memcpy(in, qat_aes_xcbc_key_seed,
282 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
283 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
284 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
287 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
289 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
290 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
293 AES_encrypt(in, out, &enc_key);
294 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
295 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
297 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
298 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
300 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
301 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
303 uint8_t *out = p_state_buf;
306 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
307 ICP_QAT_HW_GALOIS_LEN_A_SZ +
308 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
309 in = rte_zmalloc("working mem for key",
310 ICP_QAT_HW_GALOIS_H_SZ, 16);
311 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
312 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
316 AES_encrypt(in, out, &enc_key);
317 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
318 ICP_QAT_HW_GALOIS_LEN_A_SZ +
319 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
324 block_size = qat_hash_get_block_size(hash_alg);
327 /* init ipad and opad from key and xor with fixed values */
328 memset(ipad, 0, block_size);
329 memset(opad, 0, block_size);
331 if (auth_keylen > (unsigned int)block_size) {
332 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
335 rte_memcpy(ipad, auth_key, auth_keylen);
336 rte_memcpy(opad, auth_key, auth_keylen);
338 for (i = 0; i < block_size; i++) {
339 uint8_t *ipad_ptr = ipad + i;
340 uint8_t *opad_ptr = opad + i;
341 *ipad_ptr ^= HMAC_IPAD_VALUE;
342 *opad_ptr ^= HMAC_OPAD_VALUE;
345 /* do partial hash of ipad and copy to state1 */
346 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
347 memset(ipad, 0, block_size);
348 memset(opad, 0, block_size);
349 PMD_DRV_LOG(ERR, "ipad precompute failed");
354 * State len is a multiple of 8, so may be larger than the digest.
355 * Put the partial hash of opad state_len bytes after state1
357 *p_state_len = qat_hash_get_state1_size(hash_alg);
358 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
359 memset(ipad, 0, block_size);
360 memset(opad, 0, block_size);
361 PMD_DRV_LOG(ERR, "opad precompute failed");
365 /* don't leave data lying around */
366 memset(ipad, 0, block_size);
367 memset(opad, 0, block_size);
371 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
374 PMD_INIT_FUNC_TRACE();
376 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
377 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
378 header->comn_req_flags =
379 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
380 QAT_COMN_PTR_TYPE_FLAT);
381 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
382 ICP_QAT_FW_LA_PARTIAL_NONE);
383 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
384 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
385 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
387 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
388 ICP_QAT_FW_LA_NO_UPDATE_STATE);
391 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
393 uint32_t cipherkeylen)
395 struct icp_qat_hw_cipher_algo_blk *cipher;
396 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
397 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
398 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
399 void *ptr = &req_tmpl->cd_ctrl;
400 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
401 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
402 enum icp_qat_hw_cipher_convert key_convert;
403 uint32_t total_key_size;
404 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
405 uint16_t cipher_offset, cd_size;
407 PMD_INIT_FUNC_TRACE();
409 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
410 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
411 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
412 ICP_QAT_FW_SLICE_CIPHER);
413 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
414 ICP_QAT_FW_SLICE_DRAM_WR);
415 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
416 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
417 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
418 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
419 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
420 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
421 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
422 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
423 ICP_QAT_FW_SLICE_CIPHER);
424 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
425 ICP_QAT_FW_SLICE_AUTH);
426 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
427 ICP_QAT_FW_SLICE_AUTH);
428 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
429 ICP_QAT_FW_SLICE_DRAM_WR);
430 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
431 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
432 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
436 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
438 * CTR Streaming ciphers are a special case. Decrypt = encrypt
439 * Overriding default values previously set
441 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
442 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
443 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
444 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
445 else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
446 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
448 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
450 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
451 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
452 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
453 cipher_cd_ctrl->cipher_state_sz =
454 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
455 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
457 total_key_size = cipherkeylen;
458 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
459 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
461 cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
462 cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
463 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
465 header->service_cmd_id = cdesc->qat_cmd;
466 qat_alg_init_common_hdr(header, proto);
468 cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
469 cipher->aes.cipher_config.val =
470 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
471 cdesc->qat_cipher_alg, key_convert,
473 memcpy(cipher->aes.key, cipherkey, cipherkeylen);
474 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
476 if (total_key_size > cipherkeylen) {
477 uint32_t padding_size = total_key_size-cipherkeylen;
479 memset(cdesc->cd_cur_ptr, 0, padding_size);
480 cdesc->cd_cur_ptr += padding_size;
482 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
483 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
488 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
491 uint32_t add_auth_data_length,
493 unsigned int operation)
495 struct icp_qat_hw_auth_setup *hash;
496 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
497 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
498 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
499 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
500 void *ptr = &req_tmpl->cd_ctrl;
501 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
502 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
503 struct icp_qat_fw_la_auth_req_params *auth_param =
504 (struct icp_qat_fw_la_auth_req_params *)
505 ((char *)&req_tmpl->serv_specif_rqpars +
506 sizeof(struct icp_qat_fw_la_cipher_req_params));
507 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
508 uint16_t state1_size = 0, state2_size = 0;
509 uint16_t hash_offset, cd_size;
510 uint32_t *aad_len = NULL;
512 PMD_INIT_FUNC_TRACE();
514 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
515 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
516 ICP_QAT_FW_SLICE_AUTH);
517 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
518 ICP_QAT_FW_SLICE_DRAM_WR);
519 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
520 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
521 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
522 ICP_QAT_FW_SLICE_AUTH);
523 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
524 ICP_QAT_FW_SLICE_CIPHER);
525 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
526 ICP_QAT_FW_SLICE_CIPHER);
527 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
528 ICP_QAT_FW_SLICE_DRAM_WR);
529 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
530 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
531 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
535 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
536 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
537 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
538 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
539 ICP_QAT_FW_LA_CMP_AUTH_RES);
541 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
542 ICP_QAT_FW_LA_RET_AUTH_RES);
543 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
544 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
548 * Setup the inner hash config
550 hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
551 hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
552 hash->auth_config.reserved = 0;
553 hash->auth_config.config =
554 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
555 cdesc->qat_hash_alg, digestsize);
557 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
558 hash->auth_counter.counter = 0;
560 hash->auth_counter.counter = rte_bswap32(
561 qat_hash_get_block_size(cdesc->qat_hash_alg));
563 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
566 * cd_cur_ptr now points at the state1 information.
568 switch (cdesc->qat_hash_alg) {
569 case ICP_QAT_HW_AUTH_ALGO_SHA1:
570 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
571 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
572 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
575 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
577 case ICP_QAT_HW_AUTH_ALGO_SHA256:
578 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
579 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
580 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
583 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
585 case ICP_QAT_HW_AUTH_ALGO_SHA512:
586 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
587 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
588 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
591 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
593 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
594 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
595 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
596 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
598 PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
602 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
603 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
604 proto = ICP_QAT_FW_LA_GCM_PROTO;
605 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
606 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
607 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
609 PMD_DRV_LOG(ERR, "(GCM)precompute failed");
613 * Write (the length of AAD) into bytes 16-19 of state2
614 * in big-endian format. This field is 8 bytes
616 auth_param->u2.aad_sz =
617 RTE_ALIGN_CEIL(add_auth_data_length, 16);
618 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
620 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
621 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
622 ICP_QAT_HW_GALOIS_H_SZ);
623 *aad_len = rte_bswap32(add_auth_data_length);
625 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
626 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
627 state1_size = qat_hash_get_state1_size(
628 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
629 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
630 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
632 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
633 (cdesc->cd_cur_ptr + state1_size + state2_size);
634 cipherconfig->aes.cipher_config.val =
635 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
636 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
637 ICP_QAT_HW_CIPHER_KEY_CONVERT,
638 ICP_QAT_HW_CIPHER_ENCRYPT);
639 memcpy(cipherconfig->aes.key, authkey, authkeylen);
640 memset(cipherconfig->aes.key + authkeylen,
641 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
642 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
643 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
644 auth_param->hash_state_sz =
645 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
647 case ICP_QAT_HW_AUTH_ALGO_MD5:
648 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
649 authkey, authkeylen, cdesc->cd_cur_ptr,
651 PMD_DRV_LOG(ERR, "(MD5)precompute failed");
654 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
657 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
661 /* Request template setup */
662 qat_alg_init_common_hdr(header, proto);
663 header->service_cmd_id = cdesc->qat_cmd;
665 /* Auth CD config setup */
666 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
667 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
668 hash_cd_ctrl->inner_res_sz = digestsize;
669 hash_cd_ctrl->final_sz = digestsize;
670 hash_cd_ctrl->inner_state1_sz = state1_size;
671 auth_param->auth_res_sz = digestsize;
673 hash_cd_ctrl->inner_state2_sz = state2_size;
674 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
675 ((sizeof(struct icp_qat_hw_auth_setup) +
676 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
679 cdesc->cd_cur_ptr += state1_size + state2_size;
680 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
682 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
683 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
688 static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
689 struct icp_qat_hw_cipher_algo_blk *cd,
690 const uint8_t *key, unsigned int keylen)
692 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
693 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
694 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
696 PMD_INIT_FUNC_TRACE();
697 rte_memcpy(cd->aes.key, key, keylen);
698 qat_alg_init_common_hdr(header, ICP_QAT_FW_LA_NO_PROTO);
699 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
700 cd_pars->u.s.content_desc_params_sz =
701 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
702 /* Cipher CD config setup */
703 cd_ctrl->cipher_key_sz = keylen >> 3;
704 cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
705 cd_ctrl->cipher_cfg_offset = 0;
706 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
707 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
710 void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
711 int alg, const uint8_t *key,
714 struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
715 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
716 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
718 PMD_INIT_FUNC_TRACE();
719 qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
720 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
721 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
724 void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
725 int alg, const uint8_t *key,
728 struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
729 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
730 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
732 PMD_INIT_FUNC_TRACE();
733 qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
734 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
735 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
738 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
741 case ICP_QAT_HW_AES_128_KEY_SZ:
742 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
744 case ICP_QAT_HW_AES_192_KEY_SZ:
745 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
747 case ICP_QAT_HW_AES_256_KEY_SZ:
748 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
756 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
759 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
760 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;