6de695e97e10eb8dce2ba8720292ba366a165d6e
[dpdk.git] / drivers / crypto / qat / qat_adf / qat_algs_build_desc.c
1 /*
2  *  This file is provided under a dual BSD/GPLv2 license.  When using or
3  *  redistributing this file, you may do so under either license.
4  *
5  *  GPL LICENSE SUMMARY
6  *  Copyright(c) 2015-2016 Intel Corporation.
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of version 2 of the GNU General Public License as
9  *  published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope that it will be useful, but
12  *  WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  General Public License for more details.
15  *
16  *  Contact Information:
17  *  qat-linux@intel.com
18  *
19  *  BSD LICENSE
20  *  Copyright(c) 2015-2016 Intel Corporation.
21  *  Redistribution and use in source and binary forms, with or without
22  *  modification, are permitted provided that the following conditions
23  *  are met:
24  *
25  *      * Redistributions of source code must retain the above copyright
26  *        notice, this list of conditions and the following disclaimer.
27  *      * Redistributions in binary form must reproduce the above copyright
28  *        notice, this list of conditions and the following disclaimer in
29  *        the documentation and/or other materials provided with the
30  *        distribution.
31  *      * Neither the name of Intel Corporation nor the names of its
32  *        contributors may be used to endorse or promote products derived
33  *        from this software without specific prior written permission.
34  *
35  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
52 #include <rte_log.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
55
56 #include "../qat_logs.h"
57 #include "qat_algs.h"
58
59 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
61 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
62
63
64 /*
65  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
66  * This is digest size rounded up to nearest quadword
67  */
68 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
69 {
70         switch (qat_hash_alg) {
71         case ICP_QAT_HW_AUTH_ALGO_SHA1:
72                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
73                                                 QAT_HW_DEFAULT_ALIGNMENT);
74         case ICP_QAT_HW_AUTH_ALGO_SHA256:
75                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
76                                                 QAT_HW_DEFAULT_ALIGNMENT);
77         case ICP_QAT_HW_AUTH_ALGO_SHA512:
78                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
79                                                 QAT_HW_DEFAULT_ALIGNMENT);
80         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
81                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
82                                                 QAT_HW_DEFAULT_ALIGNMENT);
83         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
84         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
85                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
86                                                 QAT_HW_DEFAULT_ALIGNMENT);
87         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
88                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
89                                                 QAT_HW_DEFAULT_ALIGNMENT);
90         case ICP_QAT_HW_AUTH_ALGO_MD5:
91                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
92                                                 QAT_HW_DEFAULT_ALIGNMENT);
93         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
94                 /* return maximum state1 size in this case */
95                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
96                                                 QAT_HW_DEFAULT_ALIGNMENT);
97         default:
98                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
99                 return -EFAULT;
100         };
101         return -EFAULT;
102 }
103
104 /* returns digest size in bytes  per hash algo */
105 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
106 {
107         switch (qat_hash_alg) {
108         case ICP_QAT_HW_AUTH_ALGO_SHA1:
109                 return ICP_QAT_HW_SHA1_STATE1_SZ;
110         case ICP_QAT_HW_AUTH_ALGO_SHA256:
111                 return ICP_QAT_HW_SHA256_STATE1_SZ;
112         case ICP_QAT_HW_AUTH_ALGO_SHA512:
113                 return ICP_QAT_HW_SHA512_STATE1_SZ;
114         case ICP_QAT_HW_AUTH_ALGO_MD5:
115                 return ICP_QAT_HW_MD5_STATE1_SZ;
116         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
117                 /* return maximum digest size in this case */
118                 return ICP_QAT_HW_SHA512_STATE1_SZ;
119         default:
120                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
121                 return -EFAULT;
122         };
123         return -EFAULT;
124 }
125
126 /* returns block size in byes per hash algo */
127 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
128 {
129         switch (qat_hash_alg) {
130         case ICP_QAT_HW_AUTH_ALGO_SHA1:
131                 return SHA_CBLOCK;
132         case ICP_QAT_HW_AUTH_ALGO_SHA256:
133                 return SHA256_CBLOCK;
134         case ICP_QAT_HW_AUTH_ALGO_SHA512:
135                 return SHA512_CBLOCK;
136         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
137                 return 16;
138         case ICP_QAT_HW_AUTH_ALGO_MD5:
139                 return MD5_CBLOCK;
140         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
141                 /* return maximum block size in this case */
142                 return SHA512_CBLOCK;
143         default:
144                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
145                 return -EFAULT;
146         };
147         return -EFAULT;
148 }
149
150 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
151 {
152         SHA_CTX ctx;
153
154         if (!SHA1_Init(&ctx))
155                 return -EFAULT;
156         SHA1_Transform(&ctx, data_in);
157         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
158         return 0;
159 }
160
161 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
162 {
163         SHA256_CTX ctx;
164
165         if (!SHA256_Init(&ctx))
166                 return -EFAULT;
167         SHA256_Transform(&ctx, data_in);
168         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
169         return 0;
170 }
171
172 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
173 {
174         SHA512_CTX ctx;
175
176         if (!SHA512_Init(&ctx))
177                 return -EFAULT;
178         SHA512_Transform(&ctx, data_in);
179         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
180         return 0;
181 }
182
183 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
184 {
185         MD5_CTX ctx;
186
187         if (!MD5_Init(&ctx))
188                 return -EFAULT;
189         MD5_Transform(&ctx, data_in);
190         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
191
192         return 0;
193 }
194
195 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
196                         uint8_t *data_in,
197                         uint8_t *data_out)
198 {
199         int digest_size;
200         uint8_t digest[qat_hash_get_digest_size(
201                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
202         uint32_t *hash_state_out_be32;
203         uint64_t *hash_state_out_be64;
204         int i;
205
206         PMD_INIT_FUNC_TRACE();
207         digest_size = qat_hash_get_digest_size(hash_alg);
208         if (digest_size <= 0)
209                 return -EFAULT;
210
211         hash_state_out_be32 = (uint32_t *)data_out;
212         hash_state_out_be64 = (uint64_t *)data_out;
213
214         switch (hash_alg) {
215         case ICP_QAT_HW_AUTH_ALGO_SHA1:
216                 if (partial_hash_sha1(data_in, digest))
217                         return -EFAULT;
218                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
219                         *hash_state_out_be32 =
220                                 rte_bswap32(*(((uint32_t *)digest)+i));
221                 break;
222         case ICP_QAT_HW_AUTH_ALGO_SHA256:
223                 if (partial_hash_sha256(data_in, digest))
224                         return -EFAULT;
225                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
226                         *hash_state_out_be32 =
227                                 rte_bswap32(*(((uint32_t *)digest)+i));
228                 break;
229         case ICP_QAT_HW_AUTH_ALGO_SHA512:
230                 if (partial_hash_sha512(data_in, digest))
231                         return -EFAULT;
232                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
233                         *hash_state_out_be64 =
234                                 rte_bswap64(*(((uint64_t *)digest)+i));
235                 break;
236         case ICP_QAT_HW_AUTH_ALGO_MD5:
237                 if (partial_hash_md5(data_in, data_out))
238                         return -EFAULT;
239                 break;
240         default:
241                 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
242                 return -EFAULT;
243         }
244
245         return 0;
246 }
247 #define HMAC_IPAD_VALUE 0x36
248 #define HMAC_OPAD_VALUE 0x5c
249 #define HASH_XCBC_PRECOMP_KEY_NUM 3
250
251 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
252                                 const uint8_t *auth_key,
253                                 uint16_t auth_keylen,
254                                 uint8_t *p_state_buf,
255                                 uint16_t *p_state_len)
256 {
257         int block_size;
258         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
259         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
260         int i;
261
262         PMD_INIT_FUNC_TRACE();
263         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
264                 static uint8_t qat_aes_xcbc_key_seed[
265                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
266                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
267                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
268                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
269                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
270                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
271                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
272                 };
273
274                 uint8_t *in = NULL;
275                 uint8_t *out = p_state_buf;
276                 int x;
277                 AES_KEY enc_key;
278
279                 in = rte_zmalloc("working mem for key",
280                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
281                 rte_memcpy(in, qat_aes_xcbc_key_seed,
282                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
283                 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
284                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
285                                 &enc_key) != 0) {
286                                 rte_free(in -
287                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
288                                 memset(out -
289                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
290                                         0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
291                                 return -EFAULT;
292                         }
293                         AES_encrypt(in, out, &enc_key);
294                         in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
295                         out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
296                 }
297                 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
298                 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
299                 return 0;
300         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
301                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
302                 uint8_t *in = NULL;
303                 uint8_t *out = p_state_buf;
304                 AES_KEY enc_key;
305
306                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
307                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
308                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
309                 in = rte_zmalloc("working mem for key",
310                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
311                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
312                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
313                         &enc_key) != 0) {
314                         return -EFAULT;
315                 }
316                 AES_encrypt(in, out, &enc_key);
317                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
318                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
319                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
320                 rte_free(in);
321                 return 0;
322         }
323
324         block_size = qat_hash_get_block_size(hash_alg);
325         if (block_size <= 0)
326                 return -EFAULT;
327         /* init ipad and opad from key and xor with fixed values */
328         memset(ipad, 0, block_size);
329         memset(opad, 0, block_size);
330
331         if (auth_keylen > (unsigned int)block_size) {
332                 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
333                 return -EFAULT;
334         }
335         rte_memcpy(ipad, auth_key, auth_keylen);
336         rte_memcpy(opad, auth_key, auth_keylen);
337
338         for (i = 0; i < block_size; i++) {
339                 uint8_t *ipad_ptr = ipad + i;
340                 uint8_t *opad_ptr = opad + i;
341                 *ipad_ptr ^= HMAC_IPAD_VALUE;
342                 *opad_ptr ^= HMAC_OPAD_VALUE;
343         }
344
345         /* do partial hash of ipad and copy to state1 */
346         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
347                 memset(ipad, 0, block_size);
348                 memset(opad, 0, block_size);
349                 PMD_DRV_LOG(ERR, "ipad precompute failed");
350                 return -EFAULT;
351         }
352
353         /*
354          * State len is a multiple of 8, so may be larger than the digest.
355          * Put the partial hash of opad state_len bytes after state1
356          */
357         *p_state_len = qat_hash_get_state1_size(hash_alg);
358         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
359                 memset(ipad, 0, block_size);
360                 memset(opad, 0, block_size);
361                 PMD_DRV_LOG(ERR, "opad precompute failed");
362                 return -EFAULT;
363         }
364
365         /*  don't leave data lying around */
366         memset(ipad, 0, block_size);
367         memset(opad, 0, block_size);
368         return 0;
369 }
370
371 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
372                 uint16_t proto)
373 {
374         PMD_INIT_FUNC_TRACE();
375         header->hdr_flags =
376                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
377         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
378         header->comn_req_flags =
379                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
380                                         QAT_COMN_PTR_TYPE_FLAT);
381         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
382                                   ICP_QAT_FW_LA_PARTIAL_NONE);
383         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
384                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
385         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
386                                 proto);
387         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
388                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
389 }
390
391 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
392                                                 uint8_t *cipherkey,
393                                                 uint32_t cipherkeylen)
394 {
395         struct icp_qat_hw_cipher_algo_blk *cipher;
396         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
397         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
398         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
399         void *ptr = &req_tmpl->cd_ctrl;
400         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
401         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
402         enum icp_qat_hw_cipher_convert key_convert;
403         uint32_t total_key_size;
404         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/Snow3G */
405         uint16_t cipher_offset, cd_size;
406
407         PMD_INIT_FUNC_TRACE();
408
409         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
410                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
411                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
412                                         ICP_QAT_FW_SLICE_CIPHER);
413                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
414                                         ICP_QAT_FW_SLICE_DRAM_WR);
415                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
416                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
417                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
418                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
419                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
420         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
421                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
422                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
423                                         ICP_QAT_FW_SLICE_CIPHER);
424                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
425                                         ICP_QAT_FW_SLICE_AUTH);
426                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
427                                         ICP_QAT_FW_SLICE_AUTH);
428                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
429                                         ICP_QAT_FW_SLICE_DRAM_WR);
430                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
431         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
432                 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
433                 return -EFAULT;
434         }
435
436         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
437                 /*
438                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
439                  * Overriding default values previously set
440                  */
441                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
442                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
443         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
444                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
445         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
446                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
447         else
448                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
449
450         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
451                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
452                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
453                 cipher_cd_ctrl->cipher_state_sz =
454                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
455                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
456         } else {
457                 total_key_size = cipherkeylen;
458                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
459                 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
460         }
461         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
462         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
463         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
464
465         header->service_cmd_id = cdesc->qat_cmd;
466         qat_alg_init_common_hdr(header, proto);
467
468         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
469         cipher->aes.cipher_config.val =
470             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
471                                         cdesc->qat_cipher_alg, key_convert,
472                                         cdesc->qat_dir);
473         memcpy(cipher->aes.key, cipherkey, cipherkeylen);
474         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
475                         cipherkeylen;
476         if (total_key_size > cipherkeylen) {
477                 uint32_t padding_size =  total_key_size-cipherkeylen;
478
479                 memset(cdesc->cd_cur_ptr, 0, padding_size);
480                 cdesc->cd_cur_ptr += padding_size;
481         }
482         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
483         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
484
485         return 0;
486 }
487
488 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
489                                                 uint8_t *authkey,
490                                                 uint32_t authkeylen,
491                                                 uint32_t add_auth_data_length,
492                                                 uint32_t digestsize,
493                                                 unsigned int operation)
494 {
495         struct icp_qat_hw_auth_setup *hash;
496         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
497         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
498         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
499         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
500         void *ptr = &req_tmpl->cd_ctrl;
501         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
502         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
503         struct icp_qat_fw_la_auth_req_params *auth_param =
504                 (struct icp_qat_fw_la_auth_req_params *)
505                 ((char *)&req_tmpl->serv_specif_rqpars +
506                 sizeof(struct icp_qat_fw_la_cipher_req_params));
507         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/Snow3G */
508         uint16_t state1_size = 0, state2_size = 0;
509         uint16_t hash_offset, cd_size;
510         uint32_t *aad_len = NULL;
511
512         PMD_INIT_FUNC_TRACE();
513
514         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
515                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
516                                         ICP_QAT_FW_SLICE_AUTH);
517                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
518                                         ICP_QAT_FW_SLICE_DRAM_WR);
519                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
520         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
521                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
522                                 ICP_QAT_FW_SLICE_AUTH);
523                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
524                                 ICP_QAT_FW_SLICE_CIPHER);
525                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
526                                 ICP_QAT_FW_SLICE_CIPHER);
527                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
528                                 ICP_QAT_FW_SLICE_DRAM_WR);
529                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
530         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
531                 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
532                 return -EFAULT;
533         }
534
535         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
536                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
537                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
538                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
539                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
540         } else {
541                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
542                                            ICP_QAT_FW_LA_RET_AUTH_RES);
543                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
544                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
545         }
546
547         /*
548          * Setup the inner hash config
549          */
550         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
551         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
552         hash->auth_config.reserved = 0;
553         hash->auth_config.config =
554                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
555                                 cdesc->qat_hash_alg, digestsize);
556
557         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
558                 hash->auth_counter.counter = 0;
559         else
560                 hash->auth_counter.counter = rte_bswap32(
561                                 qat_hash_get_block_size(cdesc->qat_hash_alg));
562
563         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
564
565         /*
566          * cd_cur_ptr now points at the state1 information.
567          */
568         switch (cdesc->qat_hash_alg) {
569         case ICP_QAT_HW_AUTH_ALGO_SHA1:
570                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
571                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
572                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
573                         return -EFAULT;
574                 }
575                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
576                 break;
577         case ICP_QAT_HW_AUTH_ALGO_SHA256:
578                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
579                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
580                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
581                         return -EFAULT;
582                 }
583                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
584                 break;
585         case ICP_QAT_HW_AUTH_ALGO_SHA512:
586                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
587                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
588                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
589                         return -EFAULT;
590                 }
591                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
592                 break;
593         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
594                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
595                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
596                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
597                         &state2_size)) {
598                         PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
599                         return -EFAULT;
600                 }
601                 break;
602         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
603         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
604                 proto = ICP_QAT_FW_LA_GCM_PROTO;
605                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
606                 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
607                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
608                         &state2_size)) {
609                         PMD_DRV_LOG(ERR, "(GCM)precompute failed");
610                         return -EFAULT;
611                 }
612                 /*
613                  * Write (the length of AAD) into bytes 16-19 of state2
614                  * in big-endian format. This field is 8 bytes
615                  */
616                 auth_param->u2.aad_sz =
617                                 RTE_ALIGN_CEIL(add_auth_data_length, 16);
618                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
619
620                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
621                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
622                                         ICP_QAT_HW_GALOIS_H_SZ);
623                 *aad_len = rte_bswap32(add_auth_data_length);
624                 break;
625         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
626                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
627                 state1_size = qat_hash_get_state1_size(
628                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
629                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
630                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
631
632                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
633                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
634                 cipherconfig->aes.cipher_config.val =
635                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
636                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
637                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
638                         ICP_QAT_HW_CIPHER_ENCRYPT);
639                 memcpy(cipherconfig->aes.key, authkey, authkeylen);
640                 memset(cipherconfig->aes.key + authkeylen,
641                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
642                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
643                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
644                 auth_param->hash_state_sz =
645                                 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
646                 break;
647         case ICP_QAT_HW_AUTH_ALGO_MD5:
648                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
649                         authkey, authkeylen, cdesc->cd_cur_ptr,
650                         &state1_size)) {
651                         PMD_DRV_LOG(ERR, "(MD5)precompute failed");
652                         return -EFAULT;
653                 }
654                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
655                 break;
656         default:
657                 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
658                 return -EFAULT;
659         }
660
661         /* Request template setup */
662         qat_alg_init_common_hdr(header, proto);
663         header->service_cmd_id = cdesc->qat_cmd;
664
665         /* Auth CD config setup */
666         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
667         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
668         hash_cd_ctrl->inner_res_sz = digestsize;
669         hash_cd_ctrl->final_sz = digestsize;
670         hash_cd_ctrl->inner_state1_sz = state1_size;
671         auth_param->auth_res_sz = digestsize;
672
673         hash_cd_ctrl->inner_state2_sz  = state2_size;
674         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
675                         ((sizeof(struct icp_qat_hw_auth_setup) +
676                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
677                                         >> 3);
678
679         cdesc->cd_cur_ptr += state1_size + state2_size;
680         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
681
682         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
683         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
684
685         return 0;
686 }
687
688 static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
689                                         struct icp_qat_hw_cipher_algo_blk *cd,
690                                         const uint8_t *key, unsigned int keylen)
691 {
692         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
693         struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
694         struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
695
696         PMD_INIT_FUNC_TRACE();
697         rte_memcpy(cd->aes.key, key, keylen);
698         qat_alg_init_common_hdr(header, ICP_QAT_FW_LA_NO_PROTO);
699         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
700         cd_pars->u.s.content_desc_params_sz =
701                                 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
702         /* Cipher CD config setup */
703         cd_ctrl->cipher_key_sz = keylen >> 3;
704         cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
705         cd_ctrl->cipher_cfg_offset = 0;
706         ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
707         ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
708 }
709
710 void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
711                                         int alg, const uint8_t *key,
712                                         unsigned int keylen)
713 {
714         struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
715         struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
716         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
717
718         PMD_INIT_FUNC_TRACE();
719         qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
720         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
721         enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
722 }
723
724 void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
725                                         int alg, const uint8_t *key,
726                                         unsigned int keylen)
727 {
728         struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
729         struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
730         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
731
732         PMD_INIT_FUNC_TRACE();
733         qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
734         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
735         dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
736 }
737
738 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
739 {
740         switch (key_len) {
741         case ICP_QAT_HW_AES_128_KEY_SZ:
742                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
743                 break;
744         case ICP_QAT_HW_AES_192_KEY_SZ:
745                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
746                 break;
747         case ICP_QAT_HW_AES_256_KEY_SZ:
748                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
749                 break;
750         default:
751                 return -EINVAL;
752         }
753         return 0;
754 }
755
756 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
757 {
758         switch (key_len) {
759         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
760                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
761                 break;
762         default:
763                 return -EINVAL;
764         }
765         return 0;
766 }