2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
6 * Copyright(c) 2015 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Contact Information:
20 * Copyright(c) 2015 Intel Corporation.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
53 #include <rte_malloc.h>
55 #include "../qat_logs.h"
58 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
59 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
63 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
64 * This is digest size rounded up to nearest quadword
66 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
68 switch (qat_hash_alg) {
69 case ICP_QAT_HW_AUTH_ALGO_SHA1:
70 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
71 QAT_HW_DEFAULT_ALIGNMENT);
72 case ICP_QAT_HW_AUTH_ALGO_SHA256:
73 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
74 QAT_HW_DEFAULT_ALIGNMENT);
75 case ICP_QAT_HW_AUTH_ALGO_SHA512:
76 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
77 QAT_HW_DEFAULT_ALIGNMENT);
78 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
79 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
80 QAT_HW_DEFAULT_ALIGNMENT);
81 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
82 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
83 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
84 QAT_HW_DEFAULT_ALIGNMENT);
85 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
86 /* return maximum state1 size in this case */
87 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
88 QAT_HW_DEFAULT_ALIGNMENT);
90 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
96 /* returns digest size in bytes per hash algo */
97 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
99 switch (qat_hash_alg) {
100 case ICP_QAT_HW_AUTH_ALGO_SHA1:
101 return ICP_QAT_HW_SHA1_STATE1_SZ;
102 case ICP_QAT_HW_AUTH_ALGO_SHA256:
103 return ICP_QAT_HW_SHA256_STATE1_SZ;
104 case ICP_QAT_HW_AUTH_ALGO_SHA512:
105 return ICP_QAT_HW_SHA512_STATE1_SZ;
106 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
107 /* return maximum digest size in this case */
108 return ICP_QAT_HW_SHA512_STATE1_SZ;
110 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
116 /* returns block size in byes per hash algo */
117 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
119 switch (qat_hash_alg) {
120 case ICP_QAT_HW_AUTH_ALGO_SHA1:
122 case ICP_QAT_HW_AUTH_ALGO_SHA256:
123 return SHA256_CBLOCK;
124 case ICP_QAT_HW_AUTH_ALGO_SHA512:
125 return SHA512_CBLOCK;
126 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
128 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
129 /* return maximum block size in this case */
130 return SHA512_CBLOCK;
132 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
138 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
142 if (!SHA1_Init(&ctx))
144 SHA1_Transform(&ctx, data_in);
145 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
149 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
153 if (!SHA256_Init(&ctx))
155 SHA256_Transform(&ctx, data_in);
156 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
160 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
164 if (!SHA512_Init(&ctx))
166 SHA512_Transform(&ctx, data_in);
167 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
171 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
176 uint8_t digest[qat_hash_get_digest_size(
177 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
178 uint32_t *hash_state_out_be32;
179 uint64_t *hash_state_out_be64;
182 PMD_INIT_FUNC_TRACE();
183 digest_size = qat_hash_get_digest_size(hash_alg);
184 if (digest_size <= 0)
187 hash_state_out_be32 = (uint32_t *)data_out;
188 hash_state_out_be64 = (uint64_t *)data_out;
191 case ICP_QAT_HW_AUTH_ALGO_SHA1:
192 if (partial_hash_sha1(data_in, digest))
194 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
195 *hash_state_out_be32 =
196 rte_bswap32(*(((uint32_t *)digest)+i));
198 case ICP_QAT_HW_AUTH_ALGO_SHA256:
199 if (partial_hash_sha256(data_in, digest))
201 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
202 *hash_state_out_be32 =
203 rte_bswap32(*(((uint32_t *)digest)+i));
205 case ICP_QAT_HW_AUTH_ALGO_SHA512:
206 if (partial_hash_sha512(data_in, digest))
208 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
209 *hash_state_out_be64 =
210 rte_bswap64(*(((uint64_t *)digest)+i));
213 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
219 #define HMAC_IPAD_VALUE 0x36
220 #define HMAC_OPAD_VALUE 0x5c
221 #define HASH_XCBC_PRECOMP_KEY_NUM 3
223 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
224 const uint8_t *auth_key,
225 uint16_t auth_keylen,
226 uint8_t *p_state_buf,
227 uint16_t *p_state_len)
230 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
231 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
234 PMD_INIT_FUNC_TRACE();
235 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
236 static uint8_t qat_aes_xcbc_key_seed[
237 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
238 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
239 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
240 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
241 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
242 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
243 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
247 uint8_t *out = p_state_buf;
251 in = rte_zmalloc("working mem for key",
252 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
253 rte_memcpy(in, qat_aes_xcbc_key_seed,
254 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
255 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
256 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
259 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
261 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
262 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
265 AES_encrypt(in, out, &enc_key);
266 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
267 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
269 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
270 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
272 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
273 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
275 uint8_t *out = p_state_buf;
278 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
279 ICP_QAT_HW_GALOIS_LEN_A_SZ +
280 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
281 in = rte_zmalloc("working mem for key",
282 ICP_QAT_HW_GALOIS_H_SZ, 16);
283 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
284 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
288 AES_encrypt(in, out, &enc_key);
289 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
290 ICP_QAT_HW_GALOIS_LEN_A_SZ +
291 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
296 block_size = qat_hash_get_block_size(hash_alg);
299 /* init ipad and opad from key and xor with fixed values */
300 memset(ipad, 0, block_size);
301 memset(opad, 0, block_size);
303 if (auth_keylen > (unsigned int)block_size) {
304 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
307 rte_memcpy(ipad, auth_key, auth_keylen);
308 rte_memcpy(opad, auth_key, auth_keylen);
310 for (i = 0; i < block_size; i++) {
311 uint8_t *ipad_ptr = ipad + i;
312 uint8_t *opad_ptr = opad + i;
313 *ipad_ptr ^= HMAC_IPAD_VALUE;
314 *opad_ptr ^= HMAC_OPAD_VALUE;
317 /* do partial hash of ipad and copy to state1 */
318 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
319 memset(ipad, 0, block_size);
320 memset(opad, 0, block_size);
321 PMD_DRV_LOG(ERR, "ipad precompute failed");
326 * State len is a multiple of 8, so may be larger than the digest.
327 * Put the partial hash of opad state_len bytes after state1
329 *p_state_len = qat_hash_get_state1_size(hash_alg);
330 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
331 memset(ipad, 0, block_size);
332 memset(opad, 0, block_size);
333 PMD_DRV_LOG(ERR, "opad precompute failed");
337 /* don't leave data lying around */
338 memset(ipad, 0, block_size);
339 memset(opad, 0, block_size);
343 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
345 PMD_INIT_FUNC_TRACE();
347 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
348 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
349 header->comn_req_flags =
350 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
351 QAT_COMN_PTR_TYPE_FLAT);
352 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
353 ICP_QAT_FW_LA_PARTIAL_NONE);
354 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
355 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
356 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
357 ICP_QAT_FW_LA_NO_PROTO);
358 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
359 ICP_QAT_FW_LA_NO_UPDATE_STATE);
362 int qat_alg_aead_session_create_content_desc(struct qat_session *cdesc,
363 uint8_t *cipherkey, uint32_t cipherkeylen,
364 uint8_t *authkey, uint32_t authkeylen,
365 uint32_t add_auth_data_length,
368 struct qat_alg_cd *content_desc = &cdesc->cd;
369 struct icp_qat_hw_cipher_algo_blk *cipher = &content_desc->cipher;
370 struct icp_qat_hw_auth_algo_blk *hash = &content_desc->hash;
371 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
372 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
373 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
374 void *ptr = &req_tmpl->cd_ctrl;
375 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
376 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
377 struct icp_qat_fw_la_auth_req_params *auth_param =
378 (struct icp_qat_fw_la_auth_req_params *)
379 ((char *)&req_tmpl->serv_specif_rqpars +
380 sizeof(struct icp_qat_fw_la_cipher_req_params));
381 enum icp_qat_hw_cipher_convert key_convert;
382 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
383 uint16_t state1_size = 0;
384 uint16_t state2_size = 0;
386 PMD_INIT_FUNC_TRACE();
389 if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
390 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
391 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
392 ICP_QAT_FW_LA_RET_AUTH_RES);
393 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
394 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
396 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
397 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
398 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
399 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
400 ICP_QAT_FW_LA_CMP_AUTH_RES);
403 cipher->aes.cipher_config.val = ICP_QAT_HW_CIPHER_CONFIG_BUILD(
404 cdesc->qat_mode, cdesc->qat_cipher_alg, key_convert,
406 memcpy(cipher->aes.key, cipherkey, cipherkeylen);
408 hash->sha.inner_setup.auth_config.reserved = 0;
409 hash->sha.inner_setup.auth_config.config =
410 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
411 cdesc->qat_hash_alg, digestsize);
412 hash->sha.inner_setup.auth_counter.counter =
413 rte_bswap32(qat_hash_get_block_size(cdesc->qat_hash_alg));
416 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
417 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
418 authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
419 ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ), &state2_size)) {
420 PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
423 } else if ((cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
424 (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
425 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
426 cipherkey, cipherkeylen, (uint8_t *)(hash->sha.state1 +
427 ICP_QAT_HW_GALOIS_128_STATE1_SZ), &state2_size)) {
428 PMD_DRV_LOG(ERR, "(GCM)precompute failed");
432 * Write (the length of AAD) into bytes 16-19 of state2
433 * in big-endian format. This field is 8 bytes
435 *(uint32_t *)&(hash->sha.state1[
436 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
437 ICP_QAT_HW_GALOIS_H_SZ]) =
438 rte_bswap32(add_auth_data_length);
439 proto = ICP_QAT_FW_LA_GCM_PROTO;
441 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
442 authkey, authkeylen, (uint8_t *)(hash->sha.state1),
444 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
449 /* Request template setup */
450 qat_alg_init_common_hdr(header);
451 header->service_cmd_id = cdesc->qat_cmd;
452 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
453 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
454 /* Configure the common header protocol flags */
455 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
456 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
457 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
459 /* Cipher CD config setup */
460 cipher_cd_ctrl->cipher_key_sz = cipherkeylen >> 3;
461 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
462 cipher_cd_ctrl->cipher_cfg_offset = 0;
464 /* Auth CD config setup */
465 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
466 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
467 hash_cd_ctrl->inner_res_sz = digestsize;
468 hash_cd_ctrl->final_sz = digestsize;
469 hash_cd_ctrl->inner_state1_sz = state1_size;
471 switch (cdesc->qat_hash_alg) {
472 case ICP_QAT_HW_AUTH_ALGO_SHA1:
473 hash_cd_ctrl->inner_state2_sz =
474 RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
476 case ICP_QAT_HW_AUTH_ALGO_SHA256:
477 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
479 case ICP_QAT_HW_AUTH_ALGO_SHA512:
480 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
482 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
483 hash_cd_ctrl->inner_state2_sz =
484 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
485 hash_cd_ctrl->inner_state1_sz =
486 ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
487 memset(hash->sha.state1, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ);
489 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
490 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
491 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_GALOIS_H_SZ +
492 ICP_QAT_HW_GALOIS_LEN_A_SZ +
493 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
494 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
495 memset(hash->sha.state1, 0, ICP_QAT_HW_GALOIS_128_STATE1_SZ);
498 PMD_DRV_LOG(ERR, "invalid HASH alg %u", cdesc->qat_hash_alg);
502 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
503 ((sizeof(struct icp_qat_hw_auth_setup) +
504 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
506 auth_param->auth_res_sz = digestsize;
509 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
510 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
511 ICP_QAT_FW_SLICE_CIPHER);
512 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
513 ICP_QAT_FW_SLICE_AUTH);
514 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
515 ICP_QAT_FW_SLICE_AUTH);
516 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
517 ICP_QAT_FW_SLICE_DRAM_WR);
518 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
519 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
520 ICP_QAT_FW_SLICE_AUTH);
521 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
522 ICP_QAT_FW_SLICE_CIPHER);
523 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
524 ICP_QAT_FW_SLICE_CIPHER);
525 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
526 ICP_QAT_FW_SLICE_DRAM_WR);
528 PMD_DRV_LOG(ERR, "invalid param, only authenticated "
529 "encryption supported");
535 static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
536 struct icp_qat_hw_cipher_algo_blk *cd,
537 const uint8_t *key, unsigned int keylen)
539 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
540 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
541 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
543 PMD_INIT_FUNC_TRACE();
544 rte_memcpy(cd->aes.key, key, keylen);
545 qat_alg_init_common_hdr(header);
546 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
547 cd_pars->u.s.content_desc_params_sz =
548 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
549 /* Cipher CD config setup */
550 cd_ctrl->cipher_key_sz = keylen >> 3;
551 cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
552 cd_ctrl->cipher_cfg_offset = 0;
553 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
554 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
557 void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
558 int alg, const uint8_t *key,
561 struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
562 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
563 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
565 PMD_INIT_FUNC_TRACE();
566 qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
567 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
568 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
571 void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
572 int alg, const uint8_t *key,
575 struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
576 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
577 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
579 PMD_INIT_FUNC_TRACE();
580 qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
581 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
582 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
585 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
588 case ICP_QAT_HW_AES_128_KEY_SZ:
589 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
591 case ICP_QAT_HW_AES_192_KEY_SZ:
592 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
594 case ICP_QAT_HW_AES_256_KEY_SZ:
595 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;