crypto/qat: add aes-sha384-hmac capability
[dpdk.git] / drivers / crypto / qat / qat_adf / qat_algs_build_desc.c
1 /*
2  *  This file is provided under a dual BSD/GPLv2 license.  When using or
3  *  redistributing this file, you may do so under either license.
4  *
5  *  GPL LICENSE SUMMARY
6  *  Copyright(c) 2015-2016 Intel Corporation.
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of version 2 of the GNU General Public License as
9  *  published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope that it will be useful, but
12  *  WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  General Public License for more details.
15  *
16  *  Contact Information:
17  *  qat-linux@intel.com
18  *
19  *  BSD LICENSE
20  *  Copyright(c) 2015-2016 Intel Corporation.
21  *  Redistribution and use in source and binary forms, with or without
22  *  modification, are permitted provided that the following conditions
23  *  are met:
24  *
25  *      * Redistributions of source code must retain the above copyright
26  *        notice, this list of conditions and the following disclaimer.
27  *      * Redistributions in binary form must reproduce the above copyright
28  *        notice, this list of conditions and the following disclaimer in
29  *        the documentation and/or other materials provided with the
30  *        distribution.
31  *      * Neither the name of Intel Corporation nor the names of its
32  *        contributors may be used to endorse or promote products derived
33  *        from this software without specific prior written permission.
34  *
35  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
52 #include <rte_log.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
55
56 #include "../qat_logs.h"
57 #include "qat_algs.h"
58
59 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
61 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
62
63
64 /*
65  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
66  * This is digest size rounded up to nearest quadword
67  */
68 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
69 {
70         switch (qat_hash_alg) {
71         case ICP_QAT_HW_AUTH_ALGO_SHA1:
72                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
73                                                 QAT_HW_DEFAULT_ALIGNMENT);
74         case ICP_QAT_HW_AUTH_ALGO_SHA224:
75                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
76                                                 QAT_HW_DEFAULT_ALIGNMENT);
77         case ICP_QAT_HW_AUTH_ALGO_SHA256:
78                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
79                                                 QAT_HW_DEFAULT_ALIGNMENT);
80         case ICP_QAT_HW_AUTH_ALGO_SHA384:
81                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
82                                                 QAT_HW_DEFAULT_ALIGNMENT);
83         case ICP_QAT_HW_AUTH_ALGO_SHA512:
84                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
85                                                 QAT_HW_DEFAULT_ALIGNMENT);
86         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
87                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
88                                                 QAT_HW_DEFAULT_ALIGNMENT);
89         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
90         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
91                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
92                                                 QAT_HW_DEFAULT_ALIGNMENT);
93         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
94                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
95                                                 QAT_HW_DEFAULT_ALIGNMENT);
96         case ICP_QAT_HW_AUTH_ALGO_MD5:
97                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
98                                                 QAT_HW_DEFAULT_ALIGNMENT);
99         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
100                 /* return maximum state1 size in this case */
101                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
102                                                 QAT_HW_DEFAULT_ALIGNMENT);
103         default:
104                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
105                 return -EFAULT;
106         };
107         return -EFAULT;
108 }
109
110 /* returns digest size in bytes  per hash algo */
111 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
112 {
113         switch (qat_hash_alg) {
114         case ICP_QAT_HW_AUTH_ALGO_SHA1:
115                 return ICP_QAT_HW_SHA1_STATE1_SZ;
116         case ICP_QAT_HW_AUTH_ALGO_SHA224:
117                 return ICP_QAT_HW_SHA224_STATE1_SZ;
118         case ICP_QAT_HW_AUTH_ALGO_SHA256:
119                 return ICP_QAT_HW_SHA256_STATE1_SZ;
120         case ICP_QAT_HW_AUTH_ALGO_SHA384:
121                 return ICP_QAT_HW_SHA384_STATE1_SZ;
122         case ICP_QAT_HW_AUTH_ALGO_SHA512:
123                 return ICP_QAT_HW_SHA512_STATE1_SZ;
124         case ICP_QAT_HW_AUTH_ALGO_MD5:
125                 return ICP_QAT_HW_MD5_STATE1_SZ;
126         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
127                 /* return maximum digest size in this case */
128                 return ICP_QAT_HW_SHA512_STATE1_SZ;
129         default:
130                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
131                 return -EFAULT;
132         };
133         return -EFAULT;
134 }
135
136 /* returns block size in byes per hash algo */
137 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
138 {
139         switch (qat_hash_alg) {
140         case ICP_QAT_HW_AUTH_ALGO_SHA1:
141                 return SHA_CBLOCK;
142         case ICP_QAT_HW_AUTH_ALGO_SHA224:
143                 return SHA256_CBLOCK;
144         case ICP_QAT_HW_AUTH_ALGO_SHA256:
145                 return SHA256_CBLOCK;
146         case ICP_QAT_HW_AUTH_ALGO_SHA384:
147                 return SHA512_CBLOCK;
148         case ICP_QAT_HW_AUTH_ALGO_SHA512:
149                 return SHA512_CBLOCK;
150         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
151                 return 16;
152         case ICP_QAT_HW_AUTH_ALGO_MD5:
153                 return MD5_CBLOCK;
154         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
155                 /* return maximum block size in this case */
156                 return SHA512_CBLOCK;
157         default:
158                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
159                 return -EFAULT;
160         };
161         return -EFAULT;
162 }
163
164 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
165 {
166         SHA_CTX ctx;
167
168         if (!SHA1_Init(&ctx))
169                 return -EFAULT;
170         SHA1_Transform(&ctx, data_in);
171         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
172         return 0;
173 }
174
175 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
176 {
177         SHA256_CTX ctx;
178
179         if (!SHA224_Init(&ctx))
180                 return -EFAULT;
181         SHA256_Transform(&ctx, data_in);
182         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
183         return 0;
184 }
185
186 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
187 {
188         SHA256_CTX ctx;
189
190         if (!SHA256_Init(&ctx))
191                 return -EFAULT;
192         SHA256_Transform(&ctx, data_in);
193         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
194         return 0;
195 }
196
197 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
198 {
199         SHA512_CTX ctx;
200
201         if (!SHA384_Init(&ctx))
202                 return -EFAULT;
203         SHA512_Transform(&ctx, data_in);
204         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
205         return 0;
206 }
207
208 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
209 {
210         SHA512_CTX ctx;
211
212         if (!SHA512_Init(&ctx))
213                 return -EFAULT;
214         SHA512_Transform(&ctx, data_in);
215         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
216         return 0;
217 }
218
219 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
220 {
221         MD5_CTX ctx;
222
223         if (!MD5_Init(&ctx))
224                 return -EFAULT;
225         MD5_Transform(&ctx, data_in);
226         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
227
228         return 0;
229 }
230
231 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
232                         uint8_t *data_in,
233                         uint8_t *data_out)
234 {
235         int digest_size;
236         uint8_t digest[qat_hash_get_digest_size(
237                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
238         uint32_t *hash_state_out_be32;
239         uint64_t *hash_state_out_be64;
240         int i;
241
242         PMD_INIT_FUNC_TRACE();
243         digest_size = qat_hash_get_digest_size(hash_alg);
244         if (digest_size <= 0)
245                 return -EFAULT;
246
247         hash_state_out_be32 = (uint32_t *)data_out;
248         hash_state_out_be64 = (uint64_t *)data_out;
249
250         switch (hash_alg) {
251         case ICP_QAT_HW_AUTH_ALGO_SHA1:
252                 if (partial_hash_sha1(data_in, digest))
253                         return -EFAULT;
254                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
255                         *hash_state_out_be32 =
256                                 rte_bswap32(*(((uint32_t *)digest)+i));
257                 break;
258         case ICP_QAT_HW_AUTH_ALGO_SHA224:
259                 if (partial_hash_sha224(data_in, digest))
260                         return -EFAULT;
261                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
262                         *hash_state_out_be32 =
263                                 rte_bswap32(*(((uint32_t *)digest)+i));
264                 break;
265         case ICP_QAT_HW_AUTH_ALGO_SHA256:
266                 if (partial_hash_sha256(data_in, digest))
267                         return -EFAULT;
268                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
269                         *hash_state_out_be32 =
270                                 rte_bswap32(*(((uint32_t *)digest)+i));
271                 break;
272         case ICP_QAT_HW_AUTH_ALGO_SHA384:
273                 if (partial_hash_sha384(data_in, digest))
274                         return -EFAULT;
275                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
276                         *hash_state_out_be64 =
277                                 rte_bswap64(*(((uint64_t *)digest)+i));
278                 break;
279         case ICP_QAT_HW_AUTH_ALGO_SHA512:
280                 if (partial_hash_sha512(data_in, digest))
281                         return -EFAULT;
282                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
283                         *hash_state_out_be64 =
284                                 rte_bswap64(*(((uint64_t *)digest)+i));
285                 break;
286         case ICP_QAT_HW_AUTH_ALGO_MD5:
287                 if (partial_hash_md5(data_in, data_out))
288                         return -EFAULT;
289                 break;
290         default:
291                 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
292                 return -EFAULT;
293         }
294
295         return 0;
296 }
297 #define HMAC_IPAD_VALUE 0x36
298 #define HMAC_OPAD_VALUE 0x5c
299 #define HASH_XCBC_PRECOMP_KEY_NUM 3
300
301 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
302                                 const uint8_t *auth_key,
303                                 uint16_t auth_keylen,
304                                 uint8_t *p_state_buf,
305                                 uint16_t *p_state_len)
306 {
307         int block_size;
308         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
309         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
310         int i;
311
312         PMD_INIT_FUNC_TRACE();
313         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
314                 static uint8_t qat_aes_xcbc_key_seed[
315                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
316                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
317                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
318                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
319                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
320                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
321                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
322                 };
323
324                 uint8_t *in = NULL;
325                 uint8_t *out = p_state_buf;
326                 int x;
327                 AES_KEY enc_key;
328
329                 in = rte_zmalloc("working mem for key",
330                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
331                 rte_memcpy(in, qat_aes_xcbc_key_seed,
332                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
333                 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
334                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
335                                 &enc_key) != 0) {
336                                 rte_free(in -
337                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
338                                 memset(out -
339                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
340                                         0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
341                                 return -EFAULT;
342                         }
343                         AES_encrypt(in, out, &enc_key);
344                         in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
345                         out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
346                 }
347                 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
348                 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
349                 return 0;
350         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
351                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
352                 uint8_t *in = NULL;
353                 uint8_t *out = p_state_buf;
354                 AES_KEY enc_key;
355
356                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
357                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
358                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
359                 in = rte_zmalloc("working mem for key",
360                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
361                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
362                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
363                         &enc_key) != 0) {
364                         return -EFAULT;
365                 }
366                 AES_encrypt(in, out, &enc_key);
367                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
368                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
369                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
370                 rte_free(in);
371                 return 0;
372         }
373
374         block_size = qat_hash_get_block_size(hash_alg);
375         if (block_size <= 0)
376                 return -EFAULT;
377         /* init ipad and opad from key and xor with fixed values */
378         memset(ipad, 0, block_size);
379         memset(opad, 0, block_size);
380
381         if (auth_keylen > (unsigned int)block_size) {
382                 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
383                 return -EFAULT;
384         }
385         rte_memcpy(ipad, auth_key, auth_keylen);
386         rte_memcpy(opad, auth_key, auth_keylen);
387
388         for (i = 0; i < block_size; i++) {
389                 uint8_t *ipad_ptr = ipad + i;
390                 uint8_t *opad_ptr = opad + i;
391                 *ipad_ptr ^= HMAC_IPAD_VALUE;
392                 *opad_ptr ^= HMAC_OPAD_VALUE;
393         }
394
395         /* do partial hash of ipad and copy to state1 */
396         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
397                 memset(ipad, 0, block_size);
398                 memset(opad, 0, block_size);
399                 PMD_DRV_LOG(ERR, "ipad precompute failed");
400                 return -EFAULT;
401         }
402
403         /*
404          * State len is a multiple of 8, so may be larger than the digest.
405          * Put the partial hash of opad state_len bytes after state1
406          */
407         *p_state_len = qat_hash_get_state1_size(hash_alg);
408         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
409                 memset(ipad, 0, block_size);
410                 memset(opad, 0, block_size);
411                 PMD_DRV_LOG(ERR, "opad precompute failed");
412                 return -EFAULT;
413         }
414
415         /*  don't leave data lying around */
416         memset(ipad, 0, block_size);
417         memset(opad, 0, block_size);
418         return 0;
419 }
420
421 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
422                 uint16_t proto)
423 {
424         PMD_INIT_FUNC_TRACE();
425         header->hdr_flags =
426                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
427         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
428         header->comn_req_flags =
429                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
430                                         QAT_COMN_PTR_TYPE_FLAT);
431         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
432                                   ICP_QAT_FW_LA_PARTIAL_NONE);
433         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
434                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
435         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
436                                 proto);
437         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
438                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
439 }
440
441 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
442                                                 uint8_t *cipherkey,
443                                                 uint32_t cipherkeylen)
444 {
445         struct icp_qat_hw_cipher_algo_blk *cipher;
446         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
447         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
448         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
449         void *ptr = &req_tmpl->cd_ctrl;
450         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
451         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
452         enum icp_qat_hw_cipher_convert key_convert;
453         uint32_t total_key_size;
454         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/Snow3G */
455         uint16_t cipher_offset, cd_size;
456
457         PMD_INIT_FUNC_TRACE();
458
459         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
460                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
461                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
462                                         ICP_QAT_FW_SLICE_CIPHER);
463                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
464                                         ICP_QAT_FW_SLICE_DRAM_WR);
465                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
466                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
467                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
468                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
469                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
470         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
471                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
472                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
473                                         ICP_QAT_FW_SLICE_CIPHER);
474                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
475                                         ICP_QAT_FW_SLICE_AUTH);
476                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
477                                         ICP_QAT_FW_SLICE_AUTH);
478                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
479                                         ICP_QAT_FW_SLICE_DRAM_WR);
480                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
481         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
482                 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
483                 return -EFAULT;
484         }
485
486         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
487                 /*
488                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
489                  * Overriding default values previously set
490                  */
491                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
492                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
493         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
494                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
495         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
496                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
497         else
498                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
499
500         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
501                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
502                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
503                 cipher_cd_ctrl->cipher_state_sz =
504                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
505                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
506         } else {
507                 total_key_size = cipherkeylen;
508                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
509                 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
510         }
511         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
512         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
513         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
514
515         header->service_cmd_id = cdesc->qat_cmd;
516         qat_alg_init_common_hdr(header, proto);
517
518         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
519         cipher->aes.cipher_config.val =
520             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
521                                         cdesc->qat_cipher_alg, key_convert,
522                                         cdesc->qat_dir);
523         memcpy(cipher->aes.key, cipherkey, cipherkeylen);
524         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
525                         cipherkeylen;
526         if (total_key_size > cipherkeylen) {
527                 uint32_t padding_size =  total_key_size-cipherkeylen;
528
529                 memset(cdesc->cd_cur_ptr, 0, padding_size);
530                 cdesc->cd_cur_ptr += padding_size;
531         }
532         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
533         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
534
535         return 0;
536 }
537
538 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
539                                                 uint8_t *authkey,
540                                                 uint32_t authkeylen,
541                                                 uint32_t add_auth_data_length,
542                                                 uint32_t digestsize,
543                                                 unsigned int operation)
544 {
545         struct icp_qat_hw_auth_setup *hash;
546         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
547         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
548         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
549         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
550         void *ptr = &req_tmpl->cd_ctrl;
551         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
552         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
553         struct icp_qat_fw_la_auth_req_params *auth_param =
554                 (struct icp_qat_fw_la_auth_req_params *)
555                 ((char *)&req_tmpl->serv_specif_rqpars +
556                 sizeof(struct icp_qat_fw_la_cipher_req_params));
557         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/Snow3G */
558         uint16_t state1_size = 0, state2_size = 0;
559         uint16_t hash_offset, cd_size;
560         uint32_t *aad_len = NULL;
561
562         PMD_INIT_FUNC_TRACE();
563
564         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
565                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
566                                         ICP_QAT_FW_SLICE_AUTH);
567                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
568                                         ICP_QAT_FW_SLICE_DRAM_WR);
569                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
570         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
571                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
572                                 ICP_QAT_FW_SLICE_AUTH);
573                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
574                                 ICP_QAT_FW_SLICE_CIPHER);
575                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
576                                 ICP_QAT_FW_SLICE_CIPHER);
577                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
578                                 ICP_QAT_FW_SLICE_DRAM_WR);
579                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
580         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
581                 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
582                 return -EFAULT;
583         }
584
585         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
586                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
587                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
588                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
589                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
590         } else {
591                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
592                                            ICP_QAT_FW_LA_RET_AUTH_RES);
593                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
594                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
595         }
596
597         /*
598          * Setup the inner hash config
599          */
600         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
601         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
602         hash->auth_config.reserved = 0;
603         hash->auth_config.config =
604                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
605                                 cdesc->qat_hash_alg, digestsize);
606
607         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
608                 hash->auth_counter.counter = 0;
609         else
610                 hash->auth_counter.counter = rte_bswap32(
611                                 qat_hash_get_block_size(cdesc->qat_hash_alg));
612
613         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
614
615         /*
616          * cd_cur_ptr now points at the state1 information.
617          */
618         switch (cdesc->qat_hash_alg) {
619         case ICP_QAT_HW_AUTH_ALGO_SHA1:
620                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
621                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
622                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
623                         return -EFAULT;
624                 }
625                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
626                 break;
627         case ICP_QAT_HW_AUTH_ALGO_SHA224:
628                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
629                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
630                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
631                         return -EFAULT;
632                 }
633                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
634                 break;
635         case ICP_QAT_HW_AUTH_ALGO_SHA256:
636                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
637                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
638                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
639                         return -EFAULT;
640                 }
641                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
642                 break;
643         case ICP_QAT_HW_AUTH_ALGO_SHA384:
644                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
645                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
646                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
647                         return -EFAULT;
648                 }
649                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
650                 break;
651         case ICP_QAT_HW_AUTH_ALGO_SHA512:
652                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
653                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
654                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
655                         return -EFAULT;
656                 }
657                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
658                 break;
659         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
660                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
661                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
662                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
663                         &state2_size)) {
664                         PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
665                         return -EFAULT;
666                 }
667                 break;
668         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
669         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
670                 proto = ICP_QAT_FW_LA_GCM_PROTO;
671                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
672                 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
673                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
674                         &state2_size)) {
675                         PMD_DRV_LOG(ERR, "(GCM)precompute failed");
676                         return -EFAULT;
677                 }
678                 /*
679                  * Write (the length of AAD) into bytes 16-19 of state2
680                  * in big-endian format. This field is 8 bytes
681                  */
682                 auth_param->u2.aad_sz =
683                                 RTE_ALIGN_CEIL(add_auth_data_length, 16);
684                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
685
686                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
687                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
688                                         ICP_QAT_HW_GALOIS_H_SZ);
689                 *aad_len = rte_bswap32(add_auth_data_length);
690                 break;
691         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
692                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
693                 state1_size = qat_hash_get_state1_size(
694                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
695                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
696                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
697
698                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
699                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
700                 cipherconfig->aes.cipher_config.val =
701                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
702                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
703                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
704                         ICP_QAT_HW_CIPHER_ENCRYPT);
705                 memcpy(cipherconfig->aes.key, authkey, authkeylen);
706                 memset(cipherconfig->aes.key + authkeylen,
707                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
708                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
709                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
710                 auth_param->hash_state_sz =
711                                 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
712                 break;
713         case ICP_QAT_HW_AUTH_ALGO_MD5:
714                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
715                         authkey, authkeylen, cdesc->cd_cur_ptr,
716                         &state1_size)) {
717                         PMD_DRV_LOG(ERR, "(MD5)precompute failed");
718                         return -EFAULT;
719                 }
720                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
721                 break;
722         default:
723                 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
724                 return -EFAULT;
725         }
726
727         /* Request template setup */
728         qat_alg_init_common_hdr(header, proto);
729         header->service_cmd_id = cdesc->qat_cmd;
730
731         /* Auth CD config setup */
732         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
733         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
734         hash_cd_ctrl->inner_res_sz = digestsize;
735         hash_cd_ctrl->final_sz = digestsize;
736         hash_cd_ctrl->inner_state1_sz = state1_size;
737         auth_param->auth_res_sz = digestsize;
738
739         hash_cd_ctrl->inner_state2_sz  = state2_size;
740         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
741                         ((sizeof(struct icp_qat_hw_auth_setup) +
742                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
743                                         >> 3);
744
745         cdesc->cd_cur_ptr += state1_size + state2_size;
746         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
747
748         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
749         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
750
751         return 0;
752 }
753
754 static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
755                                         struct icp_qat_hw_cipher_algo_blk *cd,
756                                         const uint8_t *key, unsigned int keylen)
757 {
758         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
759         struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
760         struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
761
762         PMD_INIT_FUNC_TRACE();
763         rte_memcpy(cd->aes.key, key, keylen);
764         qat_alg_init_common_hdr(header, ICP_QAT_FW_LA_NO_PROTO);
765         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
766         cd_pars->u.s.content_desc_params_sz =
767                                 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
768         /* Cipher CD config setup */
769         cd_ctrl->cipher_key_sz = keylen >> 3;
770         cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
771         cd_ctrl->cipher_cfg_offset = 0;
772         ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
773         ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
774 }
775
776 void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
777                                         int alg, const uint8_t *key,
778                                         unsigned int keylen)
779 {
780         struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
781         struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
782         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
783
784         PMD_INIT_FUNC_TRACE();
785         qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
786         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
787         enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
788 }
789
790 void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
791                                         int alg, const uint8_t *key,
792                                         unsigned int keylen)
793 {
794         struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
795         struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
796         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
797
798         PMD_INIT_FUNC_TRACE();
799         qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
800         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
801         dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
802 }
803
804 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
805 {
806         switch (key_len) {
807         case ICP_QAT_HW_AES_128_KEY_SZ:
808                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
809                 break;
810         case ICP_QAT_HW_AES_192_KEY_SZ:
811                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
812                 break;
813         case ICP_QAT_HW_AES_256_KEY_SZ:
814                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
815                 break;
816         default:
817                 return -EINVAL;
818         }
819         return 0;
820 }
821
822 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
823 {
824         switch (key_len) {
825         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
826                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
827                 break;
828         default:
829                 return -EINVAL;
830         }
831         return 0;
832 }