2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
6 * Copyright(c) 2015-2016 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Contact Information:
20 * Copyright(c) 2015-2016 Intel Corporation.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
53 #include <rte_malloc.h>
55 #include "../qat_logs.h"
58 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
59 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
63 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
64 * This is digest size rounded up to nearest quadword
66 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
68 switch (qat_hash_alg) {
69 case ICP_QAT_HW_AUTH_ALGO_SHA1:
70 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
71 QAT_HW_DEFAULT_ALIGNMENT);
72 case ICP_QAT_HW_AUTH_ALGO_SHA256:
73 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
74 QAT_HW_DEFAULT_ALIGNMENT);
75 case ICP_QAT_HW_AUTH_ALGO_SHA512:
76 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
77 QAT_HW_DEFAULT_ALIGNMENT);
78 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
79 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
80 QAT_HW_DEFAULT_ALIGNMENT);
81 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
82 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
83 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
84 QAT_HW_DEFAULT_ALIGNMENT);
85 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
86 /* return maximum state1 size in this case */
87 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
88 QAT_HW_DEFAULT_ALIGNMENT);
90 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
96 /* returns digest size in bytes per hash algo */
97 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
99 switch (qat_hash_alg) {
100 case ICP_QAT_HW_AUTH_ALGO_SHA1:
101 return ICP_QAT_HW_SHA1_STATE1_SZ;
102 case ICP_QAT_HW_AUTH_ALGO_SHA256:
103 return ICP_QAT_HW_SHA256_STATE1_SZ;
104 case ICP_QAT_HW_AUTH_ALGO_SHA512:
105 return ICP_QAT_HW_SHA512_STATE1_SZ;
106 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
107 /* return maximum digest size in this case */
108 return ICP_QAT_HW_SHA512_STATE1_SZ;
110 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
116 /* returns block size in byes per hash algo */
117 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
119 switch (qat_hash_alg) {
120 case ICP_QAT_HW_AUTH_ALGO_SHA1:
122 case ICP_QAT_HW_AUTH_ALGO_SHA256:
123 return SHA256_CBLOCK;
124 case ICP_QAT_HW_AUTH_ALGO_SHA512:
125 return SHA512_CBLOCK;
126 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
128 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
129 /* return maximum block size in this case */
130 return SHA512_CBLOCK;
132 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
138 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
142 if (!SHA1_Init(&ctx))
144 SHA1_Transform(&ctx, data_in);
145 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
149 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
153 if (!SHA256_Init(&ctx))
155 SHA256_Transform(&ctx, data_in);
156 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
160 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
164 if (!SHA512_Init(&ctx))
166 SHA512_Transform(&ctx, data_in);
167 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
171 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
176 uint8_t digest[qat_hash_get_digest_size(
177 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
178 uint32_t *hash_state_out_be32;
179 uint64_t *hash_state_out_be64;
182 PMD_INIT_FUNC_TRACE();
183 digest_size = qat_hash_get_digest_size(hash_alg);
184 if (digest_size <= 0)
187 hash_state_out_be32 = (uint32_t *)data_out;
188 hash_state_out_be64 = (uint64_t *)data_out;
191 case ICP_QAT_HW_AUTH_ALGO_SHA1:
192 if (partial_hash_sha1(data_in, digest))
194 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
195 *hash_state_out_be32 =
196 rte_bswap32(*(((uint32_t *)digest)+i));
198 case ICP_QAT_HW_AUTH_ALGO_SHA256:
199 if (partial_hash_sha256(data_in, digest))
201 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
202 *hash_state_out_be32 =
203 rte_bswap32(*(((uint32_t *)digest)+i));
205 case ICP_QAT_HW_AUTH_ALGO_SHA512:
206 if (partial_hash_sha512(data_in, digest))
208 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
209 *hash_state_out_be64 =
210 rte_bswap64(*(((uint64_t *)digest)+i));
213 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
219 #define HMAC_IPAD_VALUE 0x36
220 #define HMAC_OPAD_VALUE 0x5c
221 #define HASH_XCBC_PRECOMP_KEY_NUM 3
223 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
224 const uint8_t *auth_key,
225 uint16_t auth_keylen,
226 uint8_t *p_state_buf,
227 uint16_t *p_state_len)
230 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
231 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
234 PMD_INIT_FUNC_TRACE();
235 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
236 static uint8_t qat_aes_xcbc_key_seed[
237 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
238 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
239 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
240 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
241 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
242 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
243 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
247 uint8_t *out = p_state_buf;
251 in = rte_zmalloc("working mem for key",
252 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
253 rte_memcpy(in, qat_aes_xcbc_key_seed,
254 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
255 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
256 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
259 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
261 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
262 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
265 AES_encrypt(in, out, &enc_key);
266 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
267 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
269 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
270 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
272 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
273 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
275 uint8_t *out = p_state_buf;
278 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
279 ICP_QAT_HW_GALOIS_LEN_A_SZ +
280 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
281 in = rte_zmalloc("working mem for key",
282 ICP_QAT_HW_GALOIS_H_SZ, 16);
283 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
284 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
288 AES_encrypt(in, out, &enc_key);
289 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
290 ICP_QAT_HW_GALOIS_LEN_A_SZ +
291 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
296 block_size = qat_hash_get_block_size(hash_alg);
299 /* init ipad and opad from key and xor with fixed values */
300 memset(ipad, 0, block_size);
301 memset(opad, 0, block_size);
303 if (auth_keylen > (unsigned int)block_size) {
304 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
307 rte_memcpy(ipad, auth_key, auth_keylen);
308 rte_memcpy(opad, auth_key, auth_keylen);
310 for (i = 0; i < block_size; i++) {
311 uint8_t *ipad_ptr = ipad + i;
312 uint8_t *opad_ptr = opad + i;
313 *ipad_ptr ^= HMAC_IPAD_VALUE;
314 *opad_ptr ^= HMAC_OPAD_VALUE;
317 /* do partial hash of ipad and copy to state1 */
318 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
319 memset(ipad, 0, block_size);
320 memset(opad, 0, block_size);
321 PMD_DRV_LOG(ERR, "ipad precompute failed");
326 * State len is a multiple of 8, so may be larger than the digest.
327 * Put the partial hash of opad state_len bytes after state1
329 *p_state_len = qat_hash_get_state1_size(hash_alg);
330 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
331 memset(ipad, 0, block_size);
332 memset(opad, 0, block_size);
333 PMD_DRV_LOG(ERR, "opad precompute failed");
337 /* don't leave data lying around */
338 memset(ipad, 0, block_size);
339 memset(opad, 0, block_size);
343 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
345 PMD_INIT_FUNC_TRACE();
347 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
348 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
349 header->comn_req_flags =
350 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
351 QAT_COMN_PTR_TYPE_FLAT);
352 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
353 ICP_QAT_FW_LA_PARTIAL_NONE);
354 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
355 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
356 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
357 ICP_QAT_FW_LA_NO_PROTO);
358 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
359 ICP_QAT_FW_LA_NO_UPDATE_STATE);
362 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
364 uint32_t cipherkeylen)
366 struct icp_qat_hw_cipher_algo_blk *cipher;
367 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
368 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
369 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
370 void *ptr = &req_tmpl->cd_ctrl;
371 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
372 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
373 enum icp_qat_hw_cipher_convert key_convert;
374 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
375 uint16_t cipher_offset = 0;
377 PMD_INIT_FUNC_TRACE();
379 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
381 (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
382 sizeof(struct icp_qat_hw_auth_algo_blk));
383 cipher_offset = sizeof(struct icp_qat_hw_auth_algo_blk);
385 cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd;
389 if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
390 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
391 ICP_QAT_FW_LA_RET_AUTH_RES);
392 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
393 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
395 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
396 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
397 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
398 ICP_QAT_FW_LA_CMP_AUTH_RES);
401 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
402 /* CTR Streaming ciphers are a special case. Decrypt = encrypt
403 * Overriding default values previously set
405 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
406 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
407 } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
408 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
410 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
412 /* For Snow3G, set key convert and other bits */
413 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
414 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
415 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
416 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
417 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
418 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
421 cipher->aes.cipher_config.val =
422 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
423 cdesc->qat_cipher_alg, key_convert,
425 memcpy(cipher->aes.key, cipherkey, cipherkeylen);
427 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
428 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
429 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
431 /* Request template setup */
432 qat_alg_init_common_hdr(header);
433 header->service_cmd_id = cdesc->qat_cmd;
435 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
436 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
437 /* Configure the common header protocol flags */
438 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
439 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
440 cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
442 /* Cipher CD config setup */
443 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
444 cipher_cd_ctrl->cipher_key_sz =
445 (ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
446 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ) >> 3;
447 cipher_cd_ctrl->cipher_state_sz =
448 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
449 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
451 cipher_cd_ctrl->cipher_key_sz = cipherkeylen >> 3;
452 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
453 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
456 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
457 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
458 ICP_QAT_FW_SLICE_CIPHER);
459 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
460 ICP_QAT_FW_SLICE_DRAM_WR);
461 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
462 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
463 ICP_QAT_FW_SLICE_CIPHER);
464 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
465 ICP_QAT_FW_SLICE_AUTH);
466 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
467 ICP_QAT_FW_SLICE_AUTH);
468 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
469 ICP_QAT_FW_SLICE_DRAM_WR);
470 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
471 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
472 ICP_QAT_FW_SLICE_AUTH);
473 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
474 ICP_QAT_FW_SLICE_CIPHER);
475 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
476 ICP_QAT_FW_SLICE_CIPHER);
477 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
478 ICP_QAT_FW_SLICE_DRAM_WR);
480 PMD_DRV_LOG(ERR, "invalid param, only authenticated "
481 "encryption supported");
487 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
490 uint32_t add_auth_data_length,
493 struct icp_qat_hw_cipher_algo_blk *cipher;
494 struct icp_qat_hw_auth_algo_blk *hash;
495 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
496 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
497 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
498 void *ptr = &req_tmpl->cd_ctrl;
499 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
500 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
501 struct icp_qat_fw_la_auth_req_params *auth_param =
502 (struct icp_qat_fw_la_auth_req_params *)
503 ((char *)&req_tmpl->serv_specif_rqpars +
504 sizeof(struct icp_qat_fw_la_cipher_req_params));
505 enum icp_qat_hw_cipher_convert key_convert;
506 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
507 uint16_t state1_size = 0;
508 uint16_t state2_size = 0;
509 uint16_t cipher_offset = 0, hash_offset = 0;
511 PMD_INIT_FUNC_TRACE();
513 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
514 hash = (struct icp_qat_hw_auth_algo_blk *)&cdesc->cd;
516 (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
517 sizeof(struct icp_qat_hw_auth_algo_blk));
519 cipher_offset = ((char *)hash - (char *)cipher);
521 cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd;
522 hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&cdesc->cd +
523 sizeof(struct icp_qat_hw_cipher_algo_blk));
525 hash_offset = ((char *)hash - (char *)cipher);
529 if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
530 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
531 ICP_QAT_FW_LA_RET_AUTH_RES);
532 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
533 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
535 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
536 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
537 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
538 ICP_QAT_FW_LA_CMP_AUTH_RES);
541 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
542 /* CTR Streaming ciphers are a special case. Decrypt = encrypt
543 * Overriding default values previously set
545 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
546 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
547 } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
548 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
550 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
552 cipher->aes.cipher_config.val =
553 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
554 cdesc->qat_cipher_alg, key_convert,
557 hash->sha.inner_setup.auth_config.reserved = 0;
558 hash->sha.inner_setup.auth_config.config =
559 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
560 cdesc->qat_hash_alg, digestsize);
561 hash->sha.inner_setup.auth_counter.counter =
562 rte_bswap32(qat_hash_get_block_size(cdesc->qat_hash_alg));
565 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
566 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
567 authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
568 ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ), &state2_size)) {
569 PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
572 } else if ((cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
573 (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
574 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
575 authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
576 ICP_QAT_HW_GALOIS_128_STATE1_SZ), &state2_size)) {
577 PMD_DRV_LOG(ERR, "(GCM)precompute failed");
581 * Write (the length of AAD) into bytes 16-19 of state2
582 * in big-endian format. This field is 8 bytes
584 *(uint32_t *)&(hash->sha.state1[
585 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
586 ICP_QAT_HW_GALOIS_H_SZ]) =
587 rte_bswap32(add_auth_data_length);
588 proto = ICP_QAT_FW_LA_GCM_PROTO;
590 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
591 authkey, authkeylen, (uint8_t *)(hash->sha.state1),
593 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
598 /* Request template setup */
599 qat_alg_init_common_hdr(header);
600 header->service_cmd_id = cdesc->qat_cmd;
601 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
602 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
603 /* Configure the common header protocol flags */
604 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
605 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
606 cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
608 /* Cipher CD config setup */
609 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
610 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
612 /* Auth CD config setup */
613 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
614 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
615 hash_cd_ctrl->inner_res_sz = digestsize;
616 hash_cd_ctrl->final_sz = digestsize;
617 hash_cd_ctrl->inner_state1_sz = state1_size;
619 switch (cdesc->qat_hash_alg) {
620 case ICP_QAT_HW_AUTH_ALGO_SHA1:
621 hash_cd_ctrl->inner_state2_sz =
622 RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
624 case ICP_QAT_HW_AUTH_ALGO_SHA256:
625 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
627 case ICP_QAT_HW_AUTH_ALGO_SHA512:
628 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
630 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
631 hash_cd_ctrl->inner_state2_sz =
632 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
633 hash_cd_ctrl->inner_state1_sz =
634 ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
635 memset(hash->sha.state1, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ);
637 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
638 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
639 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_GALOIS_H_SZ +
640 ICP_QAT_HW_GALOIS_LEN_A_SZ +
641 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
642 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
643 memset(hash->sha.state1, 0, ICP_QAT_HW_GALOIS_128_STATE1_SZ);
646 PMD_DRV_LOG(ERR, "invalid HASH alg %u", cdesc->qat_hash_alg);
650 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
651 ((sizeof(struct icp_qat_hw_auth_setup) +
652 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
654 auth_param->auth_res_sz = digestsize;
656 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
657 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
658 ICP_QAT_FW_SLICE_AUTH);
659 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
660 ICP_QAT_FW_SLICE_DRAM_WR);
661 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
662 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
663 ICP_QAT_FW_SLICE_CIPHER);
664 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
665 ICP_QAT_FW_SLICE_AUTH);
666 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
667 ICP_QAT_FW_SLICE_AUTH);
668 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
669 ICP_QAT_FW_SLICE_DRAM_WR);
670 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
671 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
672 ICP_QAT_FW_SLICE_AUTH);
673 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
674 ICP_QAT_FW_SLICE_CIPHER);
675 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
676 ICP_QAT_FW_SLICE_CIPHER);
677 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
678 ICP_QAT_FW_SLICE_DRAM_WR);
680 PMD_DRV_LOG(ERR, "invalid param, only authenticated "
681 "encryption supported");
687 static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
688 struct icp_qat_hw_cipher_algo_blk *cd,
689 const uint8_t *key, unsigned int keylen)
691 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
692 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
693 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
695 PMD_INIT_FUNC_TRACE();
696 rte_memcpy(cd->aes.key, key, keylen);
697 qat_alg_init_common_hdr(header);
698 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
699 cd_pars->u.s.content_desc_params_sz =
700 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
701 /* Cipher CD config setup */
702 cd_ctrl->cipher_key_sz = keylen >> 3;
703 cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
704 cd_ctrl->cipher_cfg_offset = 0;
705 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
706 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
709 void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
710 int alg, const uint8_t *key,
713 struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
714 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
715 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
717 PMD_INIT_FUNC_TRACE();
718 qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
719 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
720 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
723 void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
724 int alg, const uint8_t *key,
727 struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
728 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
729 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
731 PMD_INIT_FUNC_TRACE();
732 qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
733 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
734 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
737 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
740 case ICP_QAT_HW_AES_128_KEY_SZ:
741 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
743 case ICP_QAT_HW_AES_192_KEY_SZ:
744 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
746 case ICP_QAT_HW_AES_256_KEY_SZ:
747 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;