cryptodev: remove unused digest-appended feature
[dpdk.git] / drivers / crypto / qat / qat_adf / qat_algs_build_desc.c
1 /*
2  *  This file is provided under a dual BSD/GPLv2 license.  When using or
3  *  redistributing this file, you may do so under either license.
4  *
5  *  GPL LICENSE SUMMARY
6  *  Copyright(c) 2015-2016 Intel Corporation.
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of version 2 of the GNU General Public License as
9  *  published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope that it will be useful, but
12  *  WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  General Public License for more details.
15  *
16  *  Contact Information:
17  *  qat-linux@intel.com
18  *
19  *  BSD LICENSE
20  *  Copyright(c) 2015-2016 Intel Corporation.
21  *  Redistribution and use in source and binary forms, with or without
22  *  modification, are permitted provided that the following conditions
23  *  are met:
24  *
25  *      * Redistributions of source code must retain the above copyright
26  *        notice, this list of conditions and the following disclaimer.
27  *      * Redistributions in binary form must reproduce the above copyright
28  *        notice, this list of conditions and the following disclaimer in
29  *        the documentation and/or other materials provided with the
30  *        distribution.
31  *      * Neither the name of Intel Corporation nor the names of its
32  *        contributors may be used to endorse or promote products derived
33  *        from this software without specific prior written permission.
34  *
35  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
52 #include <rte_log.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
55
56 #include "../qat_logs.h"
57 #include "qat_algs.h"
58
59 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
61 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
62
63
64 /*
65  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
66  * This is digest size rounded up to nearest quadword
67  */
68 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
69 {
70         switch (qat_hash_alg) {
71         case ICP_QAT_HW_AUTH_ALGO_SHA1:
72                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
73                                                 QAT_HW_DEFAULT_ALIGNMENT);
74         case ICP_QAT_HW_AUTH_ALGO_SHA224:
75                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
76                                                 QAT_HW_DEFAULT_ALIGNMENT);
77         case ICP_QAT_HW_AUTH_ALGO_SHA256:
78                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
79                                                 QAT_HW_DEFAULT_ALIGNMENT);
80         case ICP_QAT_HW_AUTH_ALGO_SHA384:
81                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
82                                                 QAT_HW_DEFAULT_ALIGNMENT);
83         case ICP_QAT_HW_AUTH_ALGO_SHA512:
84                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
85                                                 QAT_HW_DEFAULT_ALIGNMENT);
86         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
87                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
88                                                 QAT_HW_DEFAULT_ALIGNMENT);
89         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
90         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
91                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
92                                                 QAT_HW_DEFAULT_ALIGNMENT);
93         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
94                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
95                                                 QAT_HW_DEFAULT_ALIGNMENT);
96         case ICP_QAT_HW_AUTH_ALGO_MD5:
97                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
98                                                 QAT_HW_DEFAULT_ALIGNMENT);
99         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
100                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
101                                                 QAT_HW_DEFAULT_ALIGNMENT);
102         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
103                 /* return maximum state1 size in this case */
104                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
105                                                 QAT_HW_DEFAULT_ALIGNMENT);
106         default:
107                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
108                 return -EFAULT;
109         };
110         return -EFAULT;
111 }
112
113 /* returns digest size in bytes  per hash algo */
114 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
115 {
116         switch (qat_hash_alg) {
117         case ICP_QAT_HW_AUTH_ALGO_SHA1:
118                 return ICP_QAT_HW_SHA1_STATE1_SZ;
119         case ICP_QAT_HW_AUTH_ALGO_SHA224:
120                 return ICP_QAT_HW_SHA224_STATE1_SZ;
121         case ICP_QAT_HW_AUTH_ALGO_SHA256:
122                 return ICP_QAT_HW_SHA256_STATE1_SZ;
123         case ICP_QAT_HW_AUTH_ALGO_SHA384:
124                 return ICP_QAT_HW_SHA384_STATE1_SZ;
125         case ICP_QAT_HW_AUTH_ALGO_SHA512:
126                 return ICP_QAT_HW_SHA512_STATE1_SZ;
127         case ICP_QAT_HW_AUTH_ALGO_MD5:
128                 return ICP_QAT_HW_MD5_STATE1_SZ;
129         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
130                 /* return maximum digest size in this case */
131                 return ICP_QAT_HW_SHA512_STATE1_SZ;
132         default:
133                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
134                 return -EFAULT;
135         };
136         return -EFAULT;
137 }
138
139 /* returns block size in byes per hash algo */
140 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
141 {
142         switch (qat_hash_alg) {
143         case ICP_QAT_HW_AUTH_ALGO_SHA1:
144                 return SHA_CBLOCK;
145         case ICP_QAT_HW_AUTH_ALGO_SHA224:
146                 return SHA256_CBLOCK;
147         case ICP_QAT_HW_AUTH_ALGO_SHA256:
148                 return SHA256_CBLOCK;
149         case ICP_QAT_HW_AUTH_ALGO_SHA384:
150                 return SHA512_CBLOCK;
151         case ICP_QAT_HW_AUTH_ALGO_SHA512:
152                 return SHA512_CBLOCK;
153         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
154                 return 16;
155         case ICP_QAT_HW_AUTH_ALGO_MD5:
156                 return MD5_CBLOCK;
157         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
158                 /* return maximum block size in this case */
159                 return SHA512_CBLOCK;
160         default:
161                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
162                 return -EFAULT;
163         };
164         return -EFAULT;
165 }
166
167 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
168 {
169         SHA_CTX ctx;
170
171         if (!SHA1_Init(&ctx))
172                 return -EFAULT;
173         SHA1_Transform(&ctx, data_in);
174         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
175         return 0;
176 }
177
178 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
179 {
180         SHA256_CTX ctx;
181
182         if (!SHA224_Init(&ctx))
183                 return -EFAULT;
184         SHA256_Transform(&ctx, data_in);
185         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
186         return 0;
187 }
188
189 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
190 {
191         SHA256_CTX ctx;
192
193         if (!SHA256_Init(&ctx))
194                 return -EFAULT;
195         SHA256_Transform(&ctx, data_in);
196         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
197         return 0;
198 }
199
200 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
201 {
202         SHA512_CTX ctx;
203
204         if (!SHA384_Init(&ctx))
205                 return -EFAULT;
206         SHA512_Transform(&ctx, data_in);
207         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
208         return 0;
209 }
210
211 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
212 {
213         SHA512_CTX ctx;
214
215         if (!SHA512_Init(&ctx))
216                 return -EFAULT;
217         SHA512_Transform(&ctx, data_in);
218         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
219         return 0;
220 }
221
222 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
223 {
224         MD5_CTX ctx;
225
226         if (!MD5_Init(&ctx))
227                 return -EFAULT;
228         MD5_Transform(&ctx, data_in);
229         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
230
231         return 0;
232 }
233
234 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
235                         uint8_t *data_in,
236                         uint8_t *data_out)
237 {
238         int digest_size;
239         uint8_t digest[qat_hash_get_digest_size(
240                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
241         uint32_t *hash_state_out_be32;
242         uint64_t *hash_state_out_be64;
243         int i;
244
245         PMD_INIT_FUNC_TRACE();
246         digest_size = qat_hash_get_digest_size(hash_alg);
247         if (digest_size <= 0)
248                 return -EFAULT;
249
250         hash_state_out_be32 = (uint32_t *)data_out;
251         hash_state_out_be64 = (uint64_t *)data_out;
252
253         switch (hash_alg) {
254         case ICP_QAT_HW_AUTH_ALGO_SHA1:
255                 if (partial_hash_sha1(data_in, digest))
256                         return -EFAULT;
257                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
258                         *hash_state_out_be32 =
259                                 rte_bswap32(*(((uint32_t *)digest)+i));
260                 break;
261         case ICP_QAT_HW_AUTH_ALGO_SHA224:
262                 if (partial_hash_sha224(data_in, digest))
263                         return -EFAULT;
264                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
265                         *hash_state_out_be32 =
266                                 rte_bswap32(*(((uint32_t *)digest)+i));
267                 break;
268         case ICP_QAT_HW_AUTH_ALGO_SHA256:
269                 if (partial_hash_sha256(data_in, digest))
270                         return -EFAULT;
271                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
272                         *hash_state_out_be32 =
273                                 rte_bswap32(*(((uint32_t *)digest)+i));
274                 break;
275         case ICP_QAT_HW_AUTH_ALGO_SHA384:
276                 if (partial_hash_sha384(data_in, digest))
277                         return -EFAULT;
278                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
279                         *hash_state_out_be64 =
280                                 rte_bswap64(*(((uint64_t *)digest)+i));
281                 break;
282         case ICP_QAT_HW_AUTH_ALGO_SHA512:
283                 if (partial_hash_sha512(data_in, digest))
284                         return -EFAULT;
285                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
286                         *hash_state_out_be64 =
287                                 rte_bswap64(*(((uint64_t *)digest)+i));
288                 break;
289         case ICP_QAT_HW_AUTH_ALGO_MD5:
290                 if (partial_hash_md5(data_in, data_out))
291                         return -EFAULT;
292                 break;
293         default:
294                 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
295                 return -EFAULT;
296         }
297
298         return 0;
299 }
300 #define HMAC_IPAD_VALUE 0x36
301 #define HMAC_OPAD_VALUE 0x5c
302 #define HASH_XCBC_PRECOMP_KEY_NUM 3
303
304 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
305                                 const uint8_t *auth_key,
306                                 uint16_t auth_keylen,
307                                 uint8_t *p_state_buf,
308                                 uint16_t *p_state_len)
309 {
310         int block_size;
311         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
312         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
313         int i;
314
315         PMD_INIT_FUNC_TRACE();
316         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
317                 static uint8_t qat_aes_xcbc_key_seed[
318                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
319                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
320                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
321                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
322                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
323                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
324                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
325                 };
326
327                 uint8_t *in = NULL;
328                 uint8_t *out = p_state_buf;
329                 int x;
330                 AES_KEY enc_key;
331
332                 in = rte_zmalloc("working mem for key",
333                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
334                 rte_memcpy(in, qat_aes_xcbc_key_seed,
335                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
336                 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
337                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
338                                 &enc_key) != 0) {
339                                 rte_free(in -
340                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
341                                 memset(out -
342                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
343                                         0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
344                                 return -EFAULT;
345                         }
346                         AES_encrypt(in, out, &enc_key);
347                         in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
348                         out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
349                 }
350                 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
351                 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
352                 return 0;
353         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
354                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
355                 uint8_t *in = NULL;
356                 uint8_t *out = p_state_buf;
357                 AES_KEY enc_key;
358
359                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
360                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
361                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
362                 in = rte_zmalloc("working mem for key",
363                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
364                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
365                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
366                         &enc_key) != 0) {
367                         return -EFAULT;
368                 }
369                 AES_encrypt(in, out, &enc_key);
370                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
371                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
372                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
373                 rte_free(in);
374                 return 0;
375         }
376
377         block_size = qat_hash_get_block_size(hash_alg);
378         if (block_size <= 0)
379                 return -EFAULT;
380         /* init ipad and opad from key and xor with fixed values */
381         memset(ipad, 0, block_size);
382         memset(opad, 0, block_size);
383
384         if (auth_keylen > (unsigned int)block_size) {
385                 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
386                 return -EFAULT;
387         }
388         rte_memcpy(ipad, auth_key, auth_keylen);
389         rte_memcpy(opad, auth_key, auth_keylen);
390
391         for (i = 0; i < block_size; i++) {
392                 uint8_t *ipad_ptr = ipad + i;
393                 uint8_t *opad_ptr = opad + i;
394                 *ipad_ptr ^= HMAC_IPAD_VALUE;
395                 *opad_ptr ^= HMAC_OPAD_VALUE;
396         }
397
398         /* do partial hash of ipad and copy to state1 */
399         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
400                 memset(ipad, 0, block_size);
401                 memset(opad, 0, block_size);
402                 PMD_DRV_LOG(ERR, "ipad precompute failed");
403                 return -EFAULT;
404         }
405
406         /*
407          * State len is a multiple of 8, so may be larger than the digest.
408          * Put the partial hash of opad state_len bytes after state1
409          */
410         *p_state_len = qat_hash_get_state1_size(hash_alg);
411         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
412                 memset(ipad, 0, block_size);
413                 memset(opad, 0, block_size);
414                 PMD_DRV_LOG(ERR, "opad precompute failed");
415                 return -EFAULT;
416         }
417
418         /*  don't leave data lying around */
419         memset(ipad, 0, block_size);
420         memset(opad, 0, block_size);
421         return 0;
422 }
423
424 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
425                 uint16_t proto)
426 {
427         PMD_INIT_FUNC_TRACE();
428         header->hdr_flags =
429                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
430         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
431         header->comn_req_flags =
432                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
433                                         QAT_COMN_PTR_TYPE_FLAT);
434         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
435                                   ICP_QAT_FW_LA_PARTIAL_NONE);
436         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
437                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
438         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
439                                 proto);
440         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
441                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
442         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
443                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
444 }
445
446 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
447                                                 uint8_t *cipherkey,
448                                                 uint32_t cipherkeylen)
449 {
450         struct icp_qat_hw_cipher_algo_blk *cipher;
451         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
452         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
453         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
454         void *ptr = &req_tmpl->cd_ctrl;
455         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
456         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
457         enum icp_qat_hw_cipher_convert key_convert;
458         uint32_t total_key_size;
459         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/SNOW 3G */
460         uint16_t cipher_offset, cd_size;
461         uint32_t wordIndex  = 0;
462         uint32_t *temp_key = NULL;
463         PMD_INIT_FUNC_TRACE();
464
465         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
466                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
467                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
468                                         ICP_QAT_FW_SLICE_CIPHER);
469                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
470                                         ICP_QAT_FW_SLICE_DRAM_WR);
471                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
472                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
473                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
474                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
475                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
476         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
477                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
478                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
479                                         ICP_QAT_FW_SLICE_CIPHER);
480                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
481                                         ICP_QAT_FW_SLICE_AUTH);
482                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
483                                         ICP_QAT_FW_SLICE_AUTH);
484                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
485                                         ICP_QAT_FW_SLICE_DRAM_WR);
486                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
487         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
488                 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
489                 return -EFAULT;
490         }
491
492         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
493                 /*
494                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
495                  * Overriding default values previously set
496                  */
497                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
498                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
499         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
500                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
501         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
502                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
503         else
504                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
505
506         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
507                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
508                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
509                 cipher_cd_ctrl->cipher_state_sz =
510                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
511                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
512         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
513                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
514                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
515                 cipher_cd_ctrl->cipher_padding_sz =
516                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
517         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
518                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
519                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
520                 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
521         } else {
522                 total_key_size = cipherkeylen;
523                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
524                 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
525         }
526         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
527         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
528         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
529
530         header->service_cmd_id = cdesc->qat_cmd;
531         qat_alg_init_common_hdr(header, proto);
532
533         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
534
535         cipher->cipher_config.val =
536             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
537                                         cdesc->qat_cipher_alg, key_convert,
538                                         cdesc->qat_dir);
539
540         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
541                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
542                                         sizeof(struct icp_qat_hw_cipher_config)
543                                         + cipherkeylen);
544                 memcpy(cipher->key, cipherkey, cipherkeylen);
545                 memcpy(temp_key, cipherkey, cipherkeylen);
546
547                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
548                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
549                                                                 wordIndex++)
550                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
551
552                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
553                                         cipherkeylen + cipherkeylen;
554         } else {
555                 memcpy(cipher->key, cipherkey, cipherkeylen);
556                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
557                                         cipherkeylen;
558         }
559
560         if (total_key_size > cipherkeylen) {
561                 uint32_t padding_size =  total_key_size-cipherkeylen;
562                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
563                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
564                         /* K3 not provided so use K1 = K3*/
565                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
566                 else
567                         memset(cdesc->cd_cur_ptr, 0, padding_size);
568                 cdesc->cd_cur_ptr += padding_size;
569         }
570         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
571         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
572
573         return 0;
574 }
575
576 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
577                                                 uint8_t *authkey,
578                                                 uint32_t authkeylen,
579                                                 uint32_t add_auth_data_length,
580                                                 uint32_t digestsize,
581                                                 unsigned int operation)
582 {
583         struct icp_qat_hw_auth_setup *hash;
584         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
585         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
586         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
587         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
588         void *ptr = &req_tmpl->cd_ctrl;
589         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
590         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
591         struct icp_qat_fw_la_auth_req_params *auth_param =
592                 (struct icp_qat_fw_la_auth_req_params *)
593                 ((char *)&req_tmpl->serv_specif_rqpars +
594                 sizeof(struct icp_qat_fw_la_cipher_req_params));
595         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/SNOW 3G */
596         uint16_t state1_size = 0, state2_size = 0;
597         uint16_t hash_offset, cd_size;
598         uint32_t *aad_len = NULL;
599         uint32_t wordIndex  = 0;
600         uint32_t *pTempKey;
601
602         PMD_INIT_FUNC_TRACE();
603
604         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
605                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
606                                         ICP_QAT_FW_SLICE_AUTH);
607                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
608                                         ICP_QAT_FW_SLICE_DRAM_WR);
609                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
610         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
611                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
612                                 ICP_QAT_FW_SLICE_AUTH);
613                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
614                                 ICP_QAT_FW_SLICE_CIPHER);
615                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
616                                 ICP_QAT_FW_SLICE_CIPHER);
617                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
618                                 ICP_QAT_FW_SLICE_DRAM_WR);
619                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
620         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
621                 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
622                 return -EFAULT;
623         }
624
625         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
626                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
627                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
628                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
629                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
630                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
631         } else {
632                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
633                                            ICP_QAT_FW_LA_RET_AUTH_RES);
634                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
635                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
636                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
637         }
638
639         /*
640          * Setup the inner hash config
641          */
642         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
643         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
644         hash->auth_config.reserved = 0;
645         hash->auth_config.config =
646                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
647                                 cdesc->qat_hash_alg, digestsize);
648
649         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
650                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9)
651                 hash->auth_counter.counter = 0;
652         else
653                 hash->auth_counter.counter = rte_bswap32(
654                                 qat_hash_get_block_size(cdesc->qat_hash_alg));
655
656         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
657
658         /*
659          * cd_cur_ptr now points at the state1 information.
660          */
661         switch (cdesc->qat_hash_alg) {
662         case ICP_QAT_HW_AUTH_ALGO_SHA1:
663                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
664                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
665                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
666                         return -EFAULT;
667                 }
668                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
669                 break;
670         case ICP_QAT_HW_AUTH_ALGO_SHA224:
671                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
672                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
673                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
674                         return -EFAULT;
675                 }
676                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
677                 break;
678         case ICP_QAT_HW_AUTH_ALGO_SHA256:
679                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
680                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
681                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
682                         return -EFAULT;
683                 }
684                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
685                 break;
686         case ICP_QAT_HW_AUTH_ALGO_SHA384:
687                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
688                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
689                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
690                         return -EFAULT;
691                 }
692                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
693                 break;
694         case ICP_QAT_HW_AUTH_ALGO_SHA512:
695                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
696                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
697                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
698                         return -EFAULT;
699                 }
700                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
701                 break;
702         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
703                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
704                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
705                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
706                         &state2_size)) {
707                         PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
708                         return -EFAULT;
709                 }
710                 break;
711         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
712         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
713                 proto = ICP_QAT_FW_LA_GCM_PROTO;
714                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
715                 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
716                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
717                         &state2_size)) {
718                         PMD_DRV_LOG(ERR, "(GCM)precompute failed");
719                         return -EFAULT;
720                 }
721                 /*
722                  * Write (the length of AAD) into bytes 16-19 of state2
723                  * in big-endian format. This field is 8 bytes
724                  */
725                 auth_param->u2.aad_sz =
726                                 RTE_ALIGN_CEIL(add_auth_data_length, 16);
727                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
728
729                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
730                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
731                                         ICP_QAT_HW_GALOIS_H_SZ);
732                 *aad_len = rte_bswap32(add_auth_data_length);
733                 break;
734         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
735                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
736                 state1_size = qat_hash_get_state1_size(
737                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
738                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
739                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
740
741                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
742                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
743                 cipherconfig->cipher_config.val =
744                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
745                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
746                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
747                         ICP_QAT_HW_CIPHER_ENCRYPT);
748                 memcpy(cipherconfig->key, authkey, authkeylen);
749                 memset(cipherconfig->key + authkeylen,
750                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
751                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
752                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
753                 auth_param->hash_state_sz =
754                                 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
755                 break;
756         case ICP_QAT_HW_AUTH_ALGO_MD5:
757                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
758                         authkey, authkeylen, cdesc->cd_cur_ptr,
759                         &state1_size)) {
760                         PMD_DRV_LOG(ERR, "(MD5)precompute failed");
761                         return -EFAULT;
762                 }
763                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
764                 break;
765         case ICP_QAT_HW_AUTH_ALGO_NULL:
766                 break;
767         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
768                 state1_size = qat_hash_get_state1_size(
769                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
770                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
771                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
772                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
773                                                         + authkeylen);
774                 /*
775                 * The Inner Hash Initial State2 block must contain IK
776                 * (Initialisation Key), followed by IK XOR-ed with KM
777                 * (Key Modifier): IK||(IK^KM).
778                 */
779                 /* write the auth key */
780                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
781                 /* initialise temp key with auth key */
782                 memcpy(pTempKey, authkey, authkeylen);
783                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
784                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
785                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
786                 break;
787         default:
788                 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
789                 return -EFAULT;
790         }
791
792         /* Request template setup */
793         qat_alg_init_common_hdr(header, proto);
794         header->service_cmd_id = cdesc->qat_cmd;
795
796         /* Auth CD config setup */
797         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
798         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
799         hash_cd_ctrl->inner_res_sz = digestsize;
800         hash_cd_ctrl->final_sz = digestsize;
801         hash_cd_ctrl->inner_state1_sz = state1_size;
802         auth_param->auth_res_sz = digestsize;
803
804         hash_cd_ctrl->inner_state2_sz  = state2_size;
805         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
806                         ((sizeof(struct icp_qat_hw_auth_setup) +
807                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
808                                         >> 3);
809
810         cdesc->cd_cur_ptr += state1_size + state2_size;
811         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
812
813         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
814         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
815
816         return 0;
817 }
818
819 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
820 {
821         switch (key_len) {
822         case ICP_QAT_HW_AES_128_KEY_SZ:
823                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
824                 break;
825         case ICP_QAT_HW_AES_192_KEY_SZ:
826                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
827                 break;
828         case ICP_QAT_HW_AES_256_KEY_SZ:
829                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
830                 break;
831         default:
832                 return -EINVAL;
833         }
834         return 0;
835 }
836
837 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
838 {
839         switch (key_len) {
840         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
841                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
842                 break;
843         default:
844                 return -EINVAL;
845         }
846         return 0;
847 }
848
849 int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
850 {
851         switch (key_len) {
852         case ICP_QAT_HW_KASUMI_KEY_SZ:
853                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
854                 break;
855         default:
856                 return -EINVAL;
857         }
858         return 0;
859 }
860
861 int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
862 {
863         switch (key_len) {
864         case QAT_3DES_KEY_SZ_OPT1:
865         case QAT_3DES_KEY_SZ_OPT2:
866                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
867                 break;
868         default:
869                 return -EINVAL;
870         }
871         return 0;
872 }