2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
6 * Copyright(c) 2015-2016 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Contact Information:
20 * Copyright(c) 2015-2016 Intel Corporation.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
56 #include "../qat_logs.h"
59 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
64 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
65 * This is digest size rounded up to nearest quadword
67 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
69 switch (qat_hash_alg) {
70 case ICP_QAT_HW_AUTH_ALGO_SHA1:
71 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
72 QAT_HW_DEFAULT_ALIGNMENT);
73 case ICP_QAT_HW_AUTH_ALGO_SHA256:
74 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
75 QAT_HW_DEFAULT_ALIGNMENT);
76 case ICP_QAT_HW_AUTH_ALGO_SHA512:
77 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
78 QAT_HW_DEFAULT_ALIGNMENT);
79 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
80 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
81 QAT_HW_DEFAULT_ALIGNMENT);
82 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
83 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
84 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
85 QAT_HW_DEFAULT_ALIGNMENT);
86 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
87 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
88 QAT_HW_DEFAULT_ALIGNMENT);
89 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
90 /* return maximum state1 size in this case */
91 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
92 QAT_HW_DEFAULT_ALIGNMENT);
94 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
100 /* returns digest size in bytes per hash algo */
101 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
103 switch (qat_hash_alg) {
104 case ICP_QAT_HW_AUTH_ALGO_SHA1:
105 return ICP_QAT_HW_SHA1_STATE1_SZ;
106 case ICP_QAT_HW_AUTH_ALGO_SHA256:
107 return ICP_QAT_HW_SHA256_STATE1_SZ;
108 case ICP_QAT_HW_AUTH_ALGO_SHA512:
109 return ICP_QAT_HW_SHA512_STATE1_SZ;
110 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
111 /* return maximum digest size in this case */
112 return ICP_QAT_HW_SHA512_STATE1_SZ;
114 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
120 /* returns block size in byes per hash algo */
121 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
123 switch (qat_hash_alg) {
124 case ICP_QAT_HW_AUTH_ALGO_SHA1:
126 case ICP_QAT_HW_AUTH_ALGO_SHA256:
127 return SHA256_CBLOCK;
128 case ICP_QAT_HW_AUTH_ALGO_SHA512:
129 return SHA512_CBLOCK;
130 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
132 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
133 /* return maximum block size in this case */
134 return SHA512_CBLOCK;
136 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
142 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
146 if (!SHA1_Init(&ctx))
148 SHA1_Transform(&ctx, data_in);
149 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
153 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
157 if (!SHA256_Init(&ctx))
159 SHA256_Transform(&ctx, data_in);
160 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
164 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
168 if (!SHA512_Init(&ctx))
170 SHA512_Transform(&ctx, data_in);
171 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
175 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
180 uint8_t digest[qat_hash_get_digest_size(
181 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
182 uint32_t *hash_state_out_be32;
183 uint64_t *hash_state_out_be64;
186 PMD_INIT_FUNC_TRACE();
187 digest_size = qat_hash_get_digest_size(hash_alg);
188 if (digest_size <= 0)
191 hash_state_out_be32 = (uint32_t *)data_out;
192 hash_state_out_be64 = (uint64_t *)data_out;
195 case ICP_QAT_HW_AUTH_ALGO_SHA1:
196 if (partial_hash_sha1(data_in, digest))
198 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
199 *hash_state_out_be32 =
200 rte_bswap32(*(((uint32_t *)digest)+i));
202 case ICP_QAT_HW_AUTH_ALGO_SHA256:
203 if (partial_hash_sha256(data_in, digest))
205 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
206 *hash_state_out_be32 =
207 rte_bswap32(*(((uint32_t *)digest)+i));
209 case ICP_QAT_HW_AUTH_ALGO_SHA512:
210 if (partial_hash_sha512(data_in, digest))
212 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
213 *hash_state_out_be64 =
214 rte_bswap64(*(((uint64_t *)digest)+i));
217 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
223 #define HMAC_IPAD_VALUE 0x36
224 #define HMAC_OPAD_VALUE 0x5c
225 #define HASH_XCBC_PRECOMP_KEY_NUM 3
227 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
228 const uint8_t *auth_key,
229 uint16_t auth_keylen,
230 uint8_t *p_state_buf,
231 uint16_t *p_state_len)
234 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
235 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
238 PMD_INIT_FUNC_TRACE();
239 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
240 static uint8_t qat_aes_xcbc_key_seed[
241 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
242 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
243 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
244 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
245 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
246 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
247 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
251 uint8_t *out = p_state_buf;
255 in = rte_zmalloc("working mem for key",
256 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
257 rte_memcpy(in, qat_aes_xcbc_key_seed,
258 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
259 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
260 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
263 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
265 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
266 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
269 AES_encrypt(in, out, &enc_key);
270 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
271 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
273 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
274 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
276 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
277 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
279 uint8_t *out = p_state_buf;
282 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
283 ICP_QAT_HW_GALOIS_LEN_A_SZ +
284 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
285 in = rte_zmalloc("working mem for key",
286 ICP_QAT_HW_GALOIS_H_SZ, 16);
287 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
288 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
292 AES_encrypt(in, out, &enc_key);
293 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
294 ICP_QAT_HW_GALOIS_LEN_A_SZ +
295 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
300 block_size = qat_hash_get_block_size(hash_alg);
303 /* init ipad and opad from key and xor with fixed values */
304 memset(ipad, 0, block_size);
305 memset(opad, 0, block_size);
307 if (auth_keylen > (unsigned int)block_size) {
308 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
311 rte_memcpy(ipad, auth_key, auth_keylen);
312 rte_memcpy(opad, auth_key, auth_keylen);
314 for (i = 0; i < block_size; i++) {
315 uint8_t *ipad_ptr = ipad + i;
316 uint8_t *opad_ptr = opad + i;
317 *ipad_ptr ^= HMAC_IPAD_VALUE;
318 *opad_ptr ^= HMAC_OPAD_VALUE;
321 /* do partial hash of ipad and copy to state1 */
322 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
323 memset(ipad, 0, block_size);
324 memset(opad, 0, block_size);
325 PMD_DRV_LOG(ERR, "ipad precompute failed");
330 * State len is a multiple of 8, so may be larger than the digest.
331 * Put the partial hash of opad state_len bytes after state1
333 *p_state_len = qat_hash_get_state1_size(hash_alg);
334 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
335 memset(ipad, 0, block_size);
336 memset(opad, 0, block_size);
337 PMD_DRV_LOG(ERR, "opad precompute failed");
341 /* don't leave data lying around */
342 memset(ipad, 0, block_size);
343 memset(opad, 0, block_size);
347 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
349 PMD_INIT_FUNC_TRACE();
351 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
352 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
353 header->comn_req_flags =
354 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
355 QAT_COMN_PTR_TYPE_FLAT);
356 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
357 ICP_QAT_FW_LA_PARTIAL_NONE);
358 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
359 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
360 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
361 ICP_QAT_FW_LA_NO_PROTO);
362 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
363 ICP_QAT_FW_LA_NO_UPDATE_STATE);
366 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
368 uint32_t cipherkeylen)
370 struct icp_qat_hw_cipher_algo_blk *cipher;
371 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
372 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
373 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
374 void *ptr = &req_tmpl->cd_ctrl;
375 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
376 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
377 enum icp_qat_hw_cipher_convert key_convert;
378 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
379 uint16_t cipher_offset = 0;
381 PMD_INIT_FUNC_TRACE();
383 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER &&
384 cdesc->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
386 (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
387 sizeof(struct icp_qat_hw_auth_algo_blk));
388 cipher_offset = sizeof(struct icp_qat_hw_auth_algo_blk);
390 cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd;
394 if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
395 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
396 ICP_QAT_FW_LA_RET_AUTH_RES);
397 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
398 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
400 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
401 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
402 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
403 ICP_QAT_FW_LA_CMP_AUTH_RES);
406 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
407 /* CTR Streaming ciphers are a special case. Decrypt = encrypt
408 * Overriding default values previously set
410 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
411 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
412 } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
413 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
415 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
417 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
418 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
420 /* For Snow3G, set key convert and other bits */
421 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
422 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
423 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
424 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
425 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
426 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
427 ICP_QAT_FW_LA_RET_AUTH_RES);
428 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
429 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
433 cipher->aes.cipher_config.val =
434 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
435 cdesc->qat_cipher_alg, key_convert,
437 memcpy(cipher->aes.key, cipherkey, cipherkeylen);
439 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
440 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
441 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
443 /* Request template setup */
444 qat_alg_init_common_hdr(header);
445 header->service_cmd_id = cdesc->qat_cmd;
446 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
447 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
448 /* Configure the common header protocol flags */
449 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
450 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
451 cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
453 /* Cipher CD config setup */
454 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
455 cipher_cd_ctrl->cipher_key_sz =
456 (ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
457 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ) >> 3;
458 cipher_cd_ctrl->cipher_state_sz =
459 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
460 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
461 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
462 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
463 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
466 cipher_cd_ctrl->cipher_key_sz = cipherkeylen >> 3;
467 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
468 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
471 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
472 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
473 ICP_QAT_FW_SLICE_CIPHER);
474 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
475 ICP_QAT_FW_SLICE_DRAM_WR);
476 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
477 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
478 ICP_QAT_FW_SLICE_CIPHER);
479 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
480 ICP_QAT_FW_SLICE_AUTH);
481 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
482 ICP_QAT_FW_SLICE_AUTH);
483 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
484 ICP_QAT_FW_SLICE_DRAM_WR);
485 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
486 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
487 ICP_QAT_FW_SLICE_AUTH);
488 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
489 ICP_QAT_FW_SLICE_CIPHER);
490 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
491 ICP_QAT_FW_SLICE_CIPHER);
492 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
493 ICP_QAT_FW_SLICE_DRAM_WR);
495 PMD_DRV_LOG(ERR, "invalid param, only authenticated "
496 "encryption supported");
502 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
505 uint32_t add_auth_data_length,
507 unsigned int operation)
509 struct icp_qat_hw_cipher_algo_blk *cipher;
510 struct icp_qat_hw_auth_algo_blk *hash;
511 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
512 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
513 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
514 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
515 void *ptr = &req_tmpl->cd_ctrl;
516 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
517 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
518 struct icp_qat_fw_la_auth_req_params *auth_param =
519 (struct icp_qat_fw_la_auth_req_params *)
520 ((char *)&req_tmpl->serv_specif_rqpars +
521 sizeof(struct icp_qat_fw_la_cipher_req_params));
522 enum icp_qat_hw_cipher_convert key_convert;
523 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
524 uint16_t state1_size = 0;
525 uint16_t state2_size = 0;
526 uint16_t cipher_offset = 0, hash_offset = 0;
528 PMD_INIT_FUNC_TRACE();
530 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER &&
531 cdesc->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
532 hash = (struct icp_qat_hw_auth_algo_blk *)&cdesc->cd;
534 (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
535 sizeof(struct icp_qat_hw_auth_algo_blk));
537 cipher_offset = ((char *)hash - (char *)cipher);
539 cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd;
540 hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&cdesc->cd +
541 sizeof(struct icp_qat_hw_cipher_algo_blk));
543 hash_offset = ((char *)hash - (char *)cipher);
547 if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
548 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
549 ICP_QAT_FW_LA_RET_AUTH_RES);
550 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
551 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
553 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
554 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
555 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
556 ICP_QAT_FW_LA_CMP_AUTH_RES);
559 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
560 /* CTR Streaming ciphers are a special case. Decrypt = encrypt
561 * Overriding default values previously set
563 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
564 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
565 } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
566 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
568 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
570 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
571 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
573 cipher->aes.cipher_config.val =
574 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
575 cdesc->qat_cipher_alg, key_convert,
578 hash->sha.inner_setup.auth_config.reserved = 0;
579 hash->sha.inner_setup.auth_config.config =
580 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
581 cdesc->qat_hash_alg, digestsize);
582 hash->sha.inner_setup.auth_counter.counter =
583 rte_bswap32(qat_hash_get_block_size(cdesc->qat_hash_alg));
584 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
585 hash->sha.inner_setup.auth_counter.counter = 0;
586 hash->sha.outer_setup.auth_config.reserved = 0;
587 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
588 ((char *)&cdesc->cd +
589 sizeof(struct icp_qat_hw_auth_algo_blk)
591 cipherconfig->aes.cipher_config.val =
592 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
593 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
594 ICP_QAT_HW_CIPHER_KEY_CONVERT,
595 ICP_QAT_HW_CIPHER_ENCRYPT);
596 memcpy(cipherconfig->aes.key, authkey, authkeylen);
597 memset(cipherconfig->aes.key + authkeylen, 0,
598 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
602 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
603 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
604 authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
605 ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ), &state2_size)) {
606 PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
609 } else if ((cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
610 (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
611 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
612 authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
613 ICP_QAT_HW_GALOIS_128_STATE1_SZ), &state2_size)) {
614 PMD_DRV_LOG(ERR, "(GCM)precompute failed");
618 * Write (the length of AAD) into bytes 16-19 of state2
619 * in big-endian format. This field is 8 bytes
621 uint32_t *aad_len = (uint32_t *)&hash->sha.state1[
622 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
623 ICP_QAT_HW_GALOIS_H_SZ];
624 *aad_len = rte_bswap32(add_auth_data_length);
626 proto = ICP_QAT_FW_LA_GCM_PROTO;
627 } else if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
628 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
629 state1_size = qat_hash_get_state1_size(cdesc->qat_hash_alg);
631 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
632 authkey, authkeylen, (uint8_t *)(hash->sha.state1),
634 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
639 /* Request template setup */
640 qat_alg_init_common_hdr(header);
641 header->service_cmd_id = cdesc->qat_cmd;
642 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
643 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
644 /* Configure the common header protocol flags */
645 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
646 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
647 cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
649 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
650 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
651 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
652 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
653 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
654 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
655 ICP_QAT_FW_LA_RET_AUTH_RES);
656 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
657 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
659 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
660 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
661 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
662 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
663 ICP_QAT_FW_LA_CMP_AUTH_RES);
666 /* Cipher CD config setup */
667 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
668 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
670 if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_AUTH) {
671 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
672 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset>>3;
674 cipher_cd_ctrl->cipher_state_sz = 0;
675 cipher_cd_ctrl->cipher_cfg_offset = 0;
678 /* Auth CD config setup */
679 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
680 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
681 hash_cd_ctrl->inner_res_sz = digestsize;
682 hash_cd_ctrl->final_sz = digestsize;
683 hash_cd_ctrl->inner_state1_sz = state1_size;
685 switch (cdesc->qat_hash_alg) {
686 case ICP_QAT_HW_AUTH_ALGO_SHA1:
687 hash_cd_ctrl->inner_state2_sz =
688 RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
690 case ICP_QAT_HW_AUTH_ALGO_SHA256:
691 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
693 case ICP_QAT_HW_AUTH_ALGO_SHA512:
694 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
696 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
697 hash_cd_ctrl->inner_state2_sz =
698 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
699 hash_cd_ctrl->inner_state1_sz =
700 ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
701 memset(hash->sha.state1, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ);
703 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
704 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
705 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_GALOIS_H_SZ +
706 ICP_QAT_HW_GALOIS_LEN_A_SZ +
707 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
708 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
709 memset(hash->sha.state1, 0, ICP_QAT_HW_GALOIS_128_STATE1_SZ);
711 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
712 hash_cd_ctrl->inner_state2_sz =
713 ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
714 hash_cd_ctrl->inner_state1_sz =
715 ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ;
716 memset(hash->sha.state1, 0, ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ);
719 PMD_DRV_LOG(ERR, "invalid HASH alg %u", cdesc->qat_hash_alg);
723 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
724 ((sizeof(struct icp_qat_hw_auth_setup) +
725 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
727 auth_param->auth_res_sz = digestsize;
729 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
730 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
731 ICP_QAT_FW_SLICE_AUTH);
732 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
733 ICP_QAT_FW_SLICE_DRAM_WR);
734 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
735 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
736 ICP_QAT_FW_SLICE_CIPHER);
737 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
738 ICP_QAT_FW_SLICE_AUTH);
739 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
740 ICP_QAT_FW_SLICE_AUTH);
741 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
742 ICP_QAT_FW_SLICE_DRAM_WR);
743 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
744 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
745 ICP_QAT_FW_SLICE_AUTH);
746 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
747 ICP_QAT_FW_SLICE_CIPHER);
748 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
749 ICP_QAT_FW_SLICE_CIPHER);
750 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
751 ICP_QAT_FW_SLICE_DRAM_WR);
753 PMD_DRV_LOG(ERR, "invalid param, only authenticated "
754 "encryption supported");
760 static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
761 struct icp_qat_hw_cipher_algo_blk *cd,
762 const uint8_t *key, unsigned int keylen)
764 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
765 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
766 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
768 PMD_INIT_FUNC_TRACE();
769 rte_memcpy(cd->aes.key, key, keylen);
770 qat_alg_init_common_hdr(header);
771 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
772 cd_pars->u.s.content_desc_params_sz =
773 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
774 /* Cipher CD config setup */
775 cd_ctrl->cipher_key_sz = keylen >> 3;
776 cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
777 cd_ctrl->cipher_cfg_offset = 0;
778 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
779 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
782 void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
783 int alg, const uint8_t *key,
786 struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
787 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
788 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
790 PMD_INIT_FUNC_TRACE();
791 qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
792 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
793 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
796 void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
797 int alg, const uint8_t *key,
800 struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
801 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
802 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
804 PMD_INIT_FUNC_TRACE();
805 qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
806 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
807 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
810 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
813 case ICP_QAT_HW_AES_128_KEY_SZ:
814 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
816 case ICP_QAT_HW_AES_192_KEY_SZ:
817 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
819 case ICP_QAT_HW_AES_256_KEY_SZ:
820 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
828 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
831 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
832 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;