2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
6 * Copyright(c) 2015-2016 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Contact Information:
20 * Copyright(c) 2015-2016 Intel Corporation.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
56 #include "../qat_logs.h"
59 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
64 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
65 * This is digest size rounded up to nearest quadword
67 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
69 switch (qat_hash_alg) {
70 case ICP_QAT_HW_AUTH_ALGO_SHA1:
71 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
72 QAT_HW_DEFAULT_ALIGNMENT);
73 case ICP_QAT_HW_AUTH_ALGO_SHA256:
74 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
75 QAT_HW_DEFAULT_ALIGNMENT);
76 case ICP_QAT_HW_AUTH_ALGO_SHA512:
77 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
78 QAT_HW_DEFAULT_ALIGNMENT);
79 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
80 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
81 QAT_HW_DEFAULT_ALIGNMENT);
82 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
83 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
84 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
85 QAT_HW_DEFAULT_ALIGNMENT);
86 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
87 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
88 QAT_HW_DEFAULT_ALIGNMENT);
89 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
90 /* return maximum state1 size in this case */
91 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
92 QAT_HW_DEFAULT_ALIGNMENT);
94 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
100 /* returns digest size in bytes per hash algo */
101 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
103 switch (qat_hash_alg) {
104 case ICP_QAT_HW_AUTH_ALGO_SHA1:
105 return ICP_QAT_HW_SHA1_STATE1_SZ;
106 case ICP_QAT_HW_AUTH_ALGO_SHA256:
107 return ICP_QAT_HW_SHA256_STATE1_SZ;
108 case ICP_QAT_HW_AUTH_ALGO_SHA512:
109 return ICP_QAT_HW_SHA512_STATE1_SZ;
110 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
111 /* return maximum digest size in this case */
112 return ICP_QAT_HW_SHA512_STATE1_SZ;
114 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
120 /* returns block size in byes per hash algo */
121 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
123 switch (qat_hash_alg) {
124 case ICP_QAT_HW_AUTH_ALGO_SHA1:
126 case ICP_QAT_HW_AUTH_ALGO_SHA256:
127 return SHA256_CBLOCK;
128 case ICP_QAT_HW_AUTH_ALGO_SHA512:
129 return SHA512_CBLOCK;
130 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
132 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
133 /* return maximum block size in this case */
134 return SHA512_CBLOCK;
136 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
142 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
146 if (!SHA1_Init(&ctx))
148 SHA1_Transform(&ctx, data_in);
149 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
153 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
157 if (!SHA256_Init(&ctx))
159 SHA256_Transform(&ctx, data_in);
160 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
164 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
168 if (!SHA512_Init(&ctx))
170 SHA512_Transform(&ctx, data_in);
171 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
175 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
180 uint8_t digest[qat_hash_get_digest_size(
181 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
182 uint32_t *hash_state_out_be32;
183 uint64_t *hash_state_out_be64;
186 PMD_INIT_FUNC_TRACE();
187 digest_size = qat_hash_get_digest_size(hash_alg);
188 if (digest_size <= 0)
191 hash_state_out_be32 = (uint32_t *)data_out;
192 hash_state_out_be64 = (uint64_t *)data_out;
195 case ICP_QAT_HW_AUTH_ALGO_SHA1:
196 if (partial_hash_sha1(data_in, digest))
198 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
199 *hash_state_out_be32 =
200 rte_bswap32(*(((uint32_t *)digest)+i));
202 case ICP_QAT_HW_AUTH_ALGO_SHA256:
203 if (partial_hash_sha256(data_in, digest))
205 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
206 *hash_state_out_be32 =
207 rte_bswap32(*(((uint32_t *)digest)+i));
209 case ICP_QAT_HW_AUTH_ALGO_SHA512:
210 if (partial_hash_sha512(data_in, digest))
212 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
213 *hash_state_out_be64 =
214 rte_bswap64(*(((uint64_t *)digest)+i));
217 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
223 #define HMAC_IPAD_VALUE 0x36
224 #define HMAC_OPAD_VALUE 0x5c
225 #define HASH_XCBC_PRECOMP_KEY_NUM 3
227 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
228 const uint8_t *auth_key,
229 uint16_t auth_keylen,
230 uint8_t *p_state_buf,
231 uint16_t *p_state_len)
234 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
235 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
238 PMD_INIT_FUNC_TRACE();
239 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
240 static uint8_t qat_aes_xcbc_key_seed[
241 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
242 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
243 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
244 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
245 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
246 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
247 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
251 uint8_t *out = p_state_buf;
255 in = rte_zmalloc("working mem for key",
256 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
257 rte_memcpy(in, qat_aes_xcbc_key_seed,
258 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
259 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
260 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
263 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
265 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
266 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
269 AES_encrypt(in, out, &enc_key);
270 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
271 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
273 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
274 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
276 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
277 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
279 uint8_t *out = p_state_buf;
282 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
283 ICP_QAT_HW_GALOIS_LEN_A_SZ +
284 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
285 in = rte_zmalloc("working mem for key",
286 ICP_QAT_HW_GALOIS_H_SZ, 16);
287 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
288 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
292 AES_encrypt(in, out, &enc_key);
293 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
294 ICP_QAT_HW_GALOIS_LEN_A_SZ +
295 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
300 block_size = qat_hash_get_block_size(hash_alg);
303 /* init ipad and opad from key and xor with fixed values */
304 memset(ipad, 0, block_size);
305 memset(opad, 0, block_size);
307 if (auth_keylen > (unsigned int)block_size) {
308 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
311 rte_memcpy(ipad, auth_key, auth_keylen);
312 rte_memcpy(opad, auth_key, auth_keylen);
314 for (i = 0; i < block_size; i++) {
315 uint8_t *ipad_ptr = ipad + i;
316 uint8_t *opad_ptr = opad + i;
317 *ipad_ptr ^= HMAC_IPAD_VALUE;
318 *opad_ptr ^= HMAC_OPAD_VALUE;
321 /* do partial hash of ipad and copy to state1 */
322 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
323 memset(ipad, 0, block_size);
324 memset(opad, 0, block_size);
325 PMD_DRV_LOG(ERR, "ipad precompute failed");
330 * State len is a multiple of 8, so may be larger than the digest.
331 * Put the partial hash of opad state_len bytes after state1
333 *p_state_len = qat_hash_get_state1_size(hash_alg);
334 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
335 memset(ipad, 0, block_size);
336 memset(opad, 0, block_size);
337 PMD_DRV_LOG(ERR, "opad precompute failed");
341 /* don't leave data lying around */
342 memset(ipad, 0, block_size);
343 memset(opad, 0, block_size);
347 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
350 PMD_INIT_FUNC_TRACE();
352 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
353 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
354 header->comn_req_flags =
355 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
356 QAT_COMN_PTR_TYPE_FLAT);
357 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
358 ICP_QAT_FW_LA_PARTIAL_NONE);
359 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
360 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
361 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
363 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
364 ICP_QAT_FW_LA_NO_UPDATE_STATE);
367 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
369 uint32_t cipherkeylen)
371 struct icp_qat_hw_cipher_algo_blk *cipher;
372 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
373 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
374 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
375 void *ptr = &req_tmpl->cd_ctrl;
376 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
377 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
378 enum icp_qat_hw_cipher_convert key_convert;
379 uint32_t total_key_size;
380 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
381 uint16_t cipher_offset, cd_size;
383 PMD_INIT_FUNC_TRACE();
385 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
386 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
387 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
388 ICP_QAT_FW_SLICE_CIPHER);
389 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
390 ICP_QAT_FW_SLICE_DRAM_WR);
391 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
392 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
393 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
394 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
395 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
396 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
397 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
398 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
399 ICP_QAT_FW_SLICE_CIPHER);
400 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
401 ICP_QAT_FW_SLICE_AUTH);
402 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
403 ICP_QAT_FW_SLICE_AUTH);
404 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
405 ICP_QAT_FW_SLICE_DRAM_WR);
406 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
407 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
408 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
412 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
414 * CTR Streaming ciphers are a special case. Decrypt = encrypt
415 * Overriding default values previously set
417 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
418 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
419 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
420 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
421 else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
422 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
424 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
426 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
427 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
428 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
429 cipher_cd_ctrl->cipher_state_sz =
430 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
431 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
433 total_key_size = cipherkeylen;
434 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
435 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
437 cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
438 cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
439 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
441 header->service_cmd_id = cdesc->qat_cmd;
442 qat_alg_init_common_hdr(header, proto);
444 cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
445 cipher->aes.cipher_config.val =
446 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
447 cdesc->qat_cipher_alg, key_convert,
449 memcpy(cipher->aes.key, cipherkey, cipherkeylen);
450 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
452 if (total_key_size > cipherkeylen) {
453 uint32_t padding_size = total_key_size-cipherkeylen;
455 memset(cdesc->cd_cur_ptr, 0, padding_size);
456 cdesc->cd_cur_ptr += padding_size;
458 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
459 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
464 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
467 uint32_t add_auth_data_length,
469 unsigned int operation)
471 struct icp_qat_hw_auth_setup *hash;
472 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
473 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
474 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
475 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
476 void *ptr = &req_tmpl->cd_ctrl;
477 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
478 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
479 struct icp_qat_fw_la_auth_req_params *auth_param =
480 (struct icp_qat_fw_la_auth_req_params *)
481 ((char *)&req_tmpl->serv_specif_rqpars +
482 sizeof(struct icp_qat_fw_la_cipher_req_params));
483 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
484 uint16_t state1_size = 0, state2_size = 0;
485 uint16_t hash_offset, cd_size;
486 uint32_t *aad_len = NULL;
488 PMD_INIT_FUNC_TRACE();
490 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
491 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
492 ICP_QAT_FW_SLICE_AUTH);
493 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
494 ICP_QAT_FW_SLICE_DRAM_WR);
495 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
496 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
497 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
498 ICP_QAT_FW_SLICE_AUTH);
499 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
500 ICP_QAT_FW_SLICE_CIPHER);
501 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
502 ICP_QAT_FW_SLICE_CIPHER);
503 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
504 ICP_QAT_FW_SLICE_DRAM_WR);
505 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
506 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
507 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
511 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
512 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
513 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
514 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
515 ICP_QAT_FW_LA_CMP_AUTH_RES);
517 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
518 ICP_QAT_FW_LA_RET_AUTH_RES);
519 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
520 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
524 * Setup the inner hash config
526 hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
527 hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
528 hash->auth_config.reserved = 0;
529 hash->auth_config.config =
530 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
531 cdesc->qat_hash_alg, digestsize);
533 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
534 hash->auth_counter.counter = 0;
536 hash->auth_counter.counter = rte_bswap32(
537 qat_hash_get_block_size(cdesc->qat_hash_alg));
539 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
542 * cd_cur_ptr now points at the state1 information.
544 switch (cdesc->qat_hash_alg) {
545 case ICP_QAT_HW_AUTH_ALGO_SHA1:
546 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
547 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
548 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
551 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
553 case ICP_QAT_HW_AUTH_ALGO_SHA256:
554 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
555 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
556 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
559 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
561 case ICP_QAT_HW_AUTH_ALGO_SHA512:
562 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
563 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
564 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
567 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
569 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
570 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
571 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
572 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
574 PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
578 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
579 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
580 proto = ICP_QAT_FW_LA_GCM_PROTO;
581 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
582 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
583 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
585 PMD_DRV_LOG(ERR, "(GCM)precompute failed");
589 * Write (the length of AAD) into bytes 16-19 of state2
590 * in big-endian format. This field is 8 bytes
592 auth_param->u2.aad_sz =
593 RTE_ALIGN_CEIL(add_auth_data_length, 16);
594 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
596 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
597 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
598 ICP_QAT_HW_GALOIS_H_SZ);
599 *aad_len = rte_bswap32(add_auth_data_length);
601 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
602 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
603 state1_size = qat_hash_get_state1_size(
604 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
605 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
606 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
608 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
609 (cdesc->cd_cur_ptr + state1_size + state2_size);
610 cipherconfig->aes.cipher_config.val =
611 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
612 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
613 ICP_QAT_HW_CIPHER_KEY_CONVERT,
614 ICP_QAT_HW_CIPHER_ENCRYPT);
615 memcpy(cipherconfig->aes.key, authkey, authkeylen);
616 memset(cipherconfig->aes.key + authkeylen,
617 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
618 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
619 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
620 auth_param->hash_state_sz =
621 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
624 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
628 /* Request template setup */
629 qat_alg_init_common_hdr(header, proto);
630 header->service_cmd_id = cdesc->qat_cmd;
632 /* Auth CD config setup */
633 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
634 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
635 hash_cd_ctrl->inner_res_sz = digestsize;
636 hash_cd_ctrl->final_sz = digestsize;
637 hash_cd_ctrl->inner_state1_sz = state1_size;
638 auth_param->auth_res_sz = digestsize;
640 hash_cd_ctrl->inner_state2_sz = state2_size;
641 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
642 ((sizeof(struct icp_qat_hw_auth_setup) +
643 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
646 cdesc->cd_cur_ptr += state1_size + state2_size;
647 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
649 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
650 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
655 static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
656 struct icp_qat_hw_cipher_algo_blk *cd,
657 const uint8_t *key, unsigned int keylen)
659 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
660 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
661 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
663 PMD_INIT_FUNC_TRACE();
664 rte_memcpy(cd->aes.key, key, keylen);
665 qat_alg_init_common_hdr(header, ICP_QAT_FW_LA_NO_PROTO);
666 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
667 cd_pars->u.s.content_desc_params_sz =
668 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
669 /* Cipher CD config setup */
670 cd_ctrl->cipher_key_sz = keylen >> 3;
671 cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
672 cd_ctrl->cipher_cfg_offset = 0;
673 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
674 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
677 void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
678 int alg, const uint8_t *key,
681 struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
682 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
683 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
685 PMD_INIT_FUNC_TRACE();
686 qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
687 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
688 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
691 void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
692 int alg, const uint8_t *key,
695 struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
696 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
697 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
699 PMD_INIT_FUNC_TRACE();
700 qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
701 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
702 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
705 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
708 case ICP_QAT_HW_AES_128_KEY_SZ:
709 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
711 case ICP_QAT_HW_AES_192_KEY_SZ:
712 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
714 case ICP_QAT_HW_AES_256_KEY_SZ:
715 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
723 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
726 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
727 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;