crypto/qat: make the session struct variable in size
[dpdk.git] / drivers / crypto / qat / qat_adf / qat_algs_build_desc.c
1 /*
2  *  This file is provided under a dual BSD/GPLv2 license.  When using or
3  *  redistributing this file, you may do so under either license.
4  *
5  *  GPL LICENSE SUMMARY
6  *  Copyright(c) 2015-2016 Intel Corporation.
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of version 2 of the GNU General Public License as
9  *  published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope that it will be useful, but
12  *  WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  General Public License for more details.
15  *
16  *  Contact Information:
17  *  qat-linux@intel.com
18  *
19  *  BSD LICENSE
20  *  Copyright(c) 2015-2016 Intel Corporation.
21  *  Redistribution and use in source and binary forms, with or without
22  *  modification, are permitted provided that the following conditions
23  *  are met:
24  *
25  *      * Redistributions of source code must retain the above copyright
26  *        notice, this list of conditions and the following disclaimer.
27  *      * Redistributions in binary form must reproduce the above copyright
28  *        notice, this list of conditions and the following disclaimer in
29  *        the documentation and/or other materials provided with the
30  *        distribution.
31  *      * Neither the name of Intel Corporation nor the names of its
32  *        contributors may be used to endorse or promote products derived
33  *        from this software without specific prior written permission.
34  *
35  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
52 #include <rte_log.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
55
56 #include "../qat_logs.h"
57 #include "qat_algs.h"
58
59 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
61
62
63 /*
64  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
65  * This is digest size rounded up to nearest quadword
66  */
67 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
68 {
69         switch (qat_hash_alg) {
70         case ICP_QAT_HW_AUTH_ALGO_SHA1:
71                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
72                                                 QAT_HW_DEFAULT_ALIGNMENT);
73         case ICP_QAT_HW_AUTH_ALGO_SHA256:
74                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
75                                                 QAT_HW_DEFAULT_ALIGNMENT);
76         case ICP_QAT_HW_AUTH_ALGO_SHA512:
77                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
78                                                 QAT_HW_DEFAULT_ALIGNMENT);
79         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
80                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
81                                                 QAT_HW_DEFAULT_ALIGNMENT);
82         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
83         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
84                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
85                                                 QAT_HW_DEFAULT_ALIGNMENT);
86         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
87                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
88                                                 QAT_HW_DEFAULT_ALIGNMENT);
89         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
90                 /* return maximum state1 size in this case */
91                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
92                                                 QAT_HW_DEFAULT_ALIGNMENT);
93         default:
94                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
95                 return -EFAULT;
96         };
97         return -EFAULT;
98 }
99
100 /* returns digest size in bytes  per hash algo */
101 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
102 {
103         switch (qat_hash_alg) {
104         case ICP_QAT_HW_AUTH_ALGO_SHA1:
105                 return ICP_QAT_HW_SHA1_STATE1_SZ;
106         case ICP_QAT_HW_AUTH_ALGO_SHA256:
107                 return ICP_QAT_HW_SHA256_STATE1_SZ;
108         case ICP_QAT_HW_AUTH_ALGO_SHA512:
109                 return ICP_QAT_HW_SHA512_STATE1_SZ;
110         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
111                 /* return maximum digest size in this case */
112                 return ICP_QAT_HW_SHA512_STATE1_SZ;
113         default:
114                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
115                 return -EFAULT;
116         };
117         return -EFAULT;
118 }
119
120 /* returns block size in byes per hash algo */
121 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
122 {
123         switch (qat_hash_alg) {
124         case ICP_QAT_HW_AUTH_ALGO_SHA1:
125                 return SHA_CBLOCK;
126         case ICP_QAT_HW_AUTH_ALGO_SHA256:
127                 return SHA256_CBLOCK;
128         case ICP_QAT_HW_AUTH_ALGO_SHA512:
129                 return SHA512_CBLOCK;
130         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
131                 return 16;
132         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
133                 /* return maximum block size in this case */
134                 return SHA512_CBLOCK;
135         default:
136                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
137                 return -EFAULT;
138         };
139         return -EFAULT;
140 }
141
142 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
143 {
144         SHA_CTX ctx;
145
146         if (!SHA1_Init(&ctx))
147                 return -EFAULT;
148         SHA1_Transform(&ctx, data_in);
149         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
150         return 0;
151 }
152
153 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
154 {
155         SHA256_CTX ctx;
156
157         if (!SHA256_Init(&ctx))
158                 return -EFAULT;
159         SHA256_Transform(&ctx, data_in);
160         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
161         return 0;
162 }
163
164 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
165 {
166         SHA512_CTX ctx;
167
168         if (!SHA512_Init(&ctx))
169                 return -EFAULT;
170         SHA512_Transform(&ctx, data_in);
171         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
172         return 0;
173 }
174
175 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
176                         uint8_t *data_in,
177                         uint8_t *data_out)
178 {
179         int digest_size;
180         uint8_t digest[qat_hash_get_digest_size(
181                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
182         uint32_t *hash_state_out_be32;
183         uint64_t *hash_state_out_be64;
184         int i;
185
186         PMD_INIT_FUNC_TRACE();
187         digest_size = qat_hash_get_digest_size(hash_alg);
188         if (digest_size <= 0)
189                 return -EFAULT;
190
191         hash_state_out_be32 = (uint32_t *)data_out;
192         hash_state_out_be64 = (uint64_t *)data_out;
193
194         switch (hash_alg) {
195         case ICP_QAT_HW_AUTH_ALGO_SHA1:
196                 if (partial_hash_sha1(data_in, digest))
197                         return -EFAULT;
198                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
199                         *hash_state_out_be32 =
200                                 rte_bswap32(*(((uint32_t *)digest)+i));
201                 break;
202         case ICP_QAT_HW_AUTH_ALGO_SHA256:
203                 if (partial_hash_sha256(data_in, digest))
204                         return -EFAULT;
205                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
206                         *hash_state_out_be32 =
207                                 rte_bswap32(*(((uint32_t *)digest)+i));
208                 break;
209         case ICP_QAT_HW_AUTH_ALGO_SHA512:
210                 if (partial_hash_sha512(data_in, digest))
211                         return -EFAULT;
212                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
213                         *hash_state_out_be64 =
214                                 rte_bswap64(*(((uint64_t *)digest)+i));
215                 break;
216         default:
217                 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
218                 return -EFAULT;
219         }
220
221         return 0;
222 }
223 #define HMAC_IPAD_VALUE 0x36
224 #define HMAC_OPAD_VALUE 0x5c
225 #define HASH_XCBC_PRECOMP_KEY_NUM 3
226
227 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
228                                 const uint8_t *auth_key,
229                                 uint16_t auth_keylen,
230                                 uint8_t *p_state_buf,
231                                 uint16_t *p_state_len)
232 {
233         int block_size;
234         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
235         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
236         int i;
237
238         PMD_INIT_FUNC_TRACE();
239         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
240                 static uint8_t qat_aes_xcbc_key_seed[
241                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
242                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
243                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
244                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
245                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
246                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
247                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
248                 };
249
250                 uint8_t *in = NULL;
251                 uint8_t *out = p_state_buf;
252                 int x;
253                 AES_KEY enc_key;
254
255                 in = rte_zmalloc("working mem for key",
256                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
257                 rte_memcpy(in, qat_aes_xcbc_key_seed,
258                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
259                 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
260                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
261                                 &enc_key) != 0) {
262                                 rte_free(in -
263                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
264                                 memset(out -
265                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
266                                         0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
267                                 return -EFAULT;
268                         }
269                         AES_encrypt(in, out, &enc_key);
270                         in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
271                         out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
272                 }
273                 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
274                 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
275                 return 0;
276         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
277                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
278                 uint8_t *in = NULL;
279                 uint8_t *out = p_state_buf;
280                 AES_KEY enc_key;
281
282                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
283                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
284                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
285                 in = rte_zmalloc("working mem for key",
286                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
287                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
288                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
289                         &enc_key) != 0) {
290                         return -EFAULT;
291                 }
292                 AES_encrypt(in, out, &enc_key);
293                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
294                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
295                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
296                 rte_free(in);
297                 return 0;
298         }
299
300         block_size = qat_hash_get_block_size(hash_alg);
301         if (block_size <= 0)
302                 return -EFAULT;
303         /* init ipad and opad from key and xor with fixed values */
304         memset(ipad, 0, block_size);
305         memset(opad, 0, block_size);
306
307         if (auth_keylen > (unsigned int)block_size) {
308                 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
309                 return -EFAULT;
310         }
311         rte_memcpy(ipad, auth_key, auth_keylen);
312         rte_memcpy(opad, auth_key, auth_keylen);
313
314         for (i = 0; i < block_size; i++) {
315                 uint8_t *ipad_ptr = ipad + i;
316                 uint8_t *opad_ptr = opad + i;
317                 *ipad_ptr ^= HMAC_IPAD_VALUE;
318                 *opad_ptr ^= HMAC_OPAD_VALUE;
319         }
320
321         /* do partial hash of ipad and copy to state1 */
322         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
323                 memset(ipad, 0, block_size);
324                 memset(opad, 0, block_size);
325                 PMD_DRV_LOG(ERR, "ipad precompute failed");
326                 return -EFAULT;
327         }
328
329         /*
330          * State len is a multiple of 8, so may be larger than the digest.
331          * Put the partial hash of opad state_len bytes after state1
332          */
333         *p_state_len = qat_hash_get_state1_size(hash_alg);
334         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
335                 memset(ipad, 0, block_size);
336                 memset(opad, 0, block_size);
337                 PMD_DRV_LOG(ERR, "opad precompute failed");
338                 return -EFAULT;
339         }
340
341         /*  don't leave data lying around */
342         memset(ipad, 0, block_size);
343         memset(opad, 0, block_size);
344         return 0;
345 }
346
347 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
348                 uint16_t proto)
349 {
350         PMD_INIT_FUNC_TRACE();
351         header->hdr_flags =
352                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
353         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
354         header->comn_req_flags =
355                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
356                                         QAT_COMN_PTR_TYPE_FLAT);
357         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
358                                   ICP_QAT_FW_LA_PARTIAL_NONE);
359         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
360                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
361         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
362                                 proto);
363         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
364                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
365 }
366
367 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
368                                                 uint8_t *cipherkey,
369                                                 uint32_t cipherkeylen)
370 {
371         struct icp_qat_hw_cipher_algo_blk *cipher;
372         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
373         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
374         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
375         void *ptr = &req_tmpl->cd_ctrl;
376         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
377         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
378         enum icp_qat_hw_cipher_convert key_convert;
379         uint32_t total_key_size;
380         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/Snow3G */
381         uint16_t cipher_offset, cd_size;
382
383         PMD_INIT_FUNC_TRACE();
384
385         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
386                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
387                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
388                                         ICP_QAT_FW_SLICE_CIPHER);
389                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
390                                         ICP_QAT_FW_SLICE_DRAM_WR);
391                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
392                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
393                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
394                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
395                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
396         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
397                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
398                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
399                                         ICP_QAT_FW_SLICE_CIPHER);
400                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
401                                         ICP_QAT_FW_SLICE_AUTH);
402                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
403                                         ICP_QAT_FW_SLICE_AUTH);
404                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
405                                         ICP_QAT_FW_SLICE_DRAM_WR);
406                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
407         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
408                 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
409                 return -EFAULT;
410         }
411
412         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
413                 /*
414                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
415                  * Overriding default values previously set
416                  */
417                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
418                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
419         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
420                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
421         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
422                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
423         else
424                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
425
426         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
427                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
428                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
429                 cipher_cd_ctrl->cipher_state_sz =
430                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
431                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
432         } else {
433                 total_key_size = cipherkeylen;
434                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
435                 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
436         }
437         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
438         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
439         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
440
441         header->service_cmd_id = cdesc->qat_cmd;
442         qat_alg_init_common_hdr(header, proto);
443
444         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
445         cipher->aes.cipher_config.val =
446             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
447                                         cdesc->qat_cipher_alg, key_convert,
448                                         cdesc->qat_dir);
449         memcpy(cipher->aes.key, cipherkey, cipherkeylen);
450         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
451                         cipherkeylen;
452         if (total_key_size > cipherkeylen) {
453                 uint32_t padding_size =  total_key_size-cipherkeylen;
454
455                 memset(cdesc->cd_cur_ptr, 0, padding_size);
456                 cdesc->cd_cur_ptr += padding_size;
457         }
458         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
459         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
460
461         return 0;
462 }
463
464 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
465                                                 uint8_t *authkey,
466                                                 uint32_t authkeylen,
467                                                 uint32_t add_auth_data_length,
468                                                 uint32_t digestsize,
469                                                 unsigned int operation)
470 {
471         struct icp_qat_hw_auth_setup *hash;
472         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
473         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
474         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
475         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
476         void *ptr = &req_tmpl->cd_ctrl;
477         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
478         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
479         struct icp_qat_fw_la_auth_req_params *auth_param =
480                 (struct icp_qat_fw_la_auth_req_params *)
481                 ((char *)&req_tmpl->serv_specif_rqpars +
482                 sizeof(struct icp_qat_fw_la_cipher_req_params));
483         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/Snow3G */
484         uint16_t state1_size = 0, state2_size = 0;
485         uint16_t hash_offset, cd_size;
486         uint32_t *aad_len = NULL;
487
488         PMD_INIT_FUNC_TRACE();
489
490         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
491                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
492                                         ICP_QAT_FW_SLICE_AUTH);
493                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
494                                         ICP_QAT_FW_SLICE_DRAM_WR);
495                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
496         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
497                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
498                                 ICP_QAT_FW_SLICE_AUTH);
499                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
500                                 ICP_QAT_FW_SLICE_CIPHER);
501                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
502                                 ICP_QAT_FW_SLICE_CIPHER);
503                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
504                                 ICP_QAT_FW_SLICE_DRAM_WR);
505                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
506         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
507                 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
508                 return -EFAULT;
509         }
510
511         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
512                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
513                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
514                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
515                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
516         } else {
517                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
518                                            ICP_QAT_FW_LA_RET_AUTH_RES);
519                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
520                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
521         }
522
523         /*
524          * Setup the inner hash config
525          */
526         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
527         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
528         hash->auth_config.reserved = 0;
529         hash->auth_config.config =
530                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
531                                 cdesc->qat_hash_alg, digestsize);
532
533         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
534                 hash->auth_counter.counter = 0;
535         else
536                 hash->auth_counter.counter = rte_bswap32(
537                                 qat_hash_get_block_size(cdesc->qat_hash_alg));
538
539         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
540
541         /*
542          * cd_cur_ptr now points at the state1 information.
543          */
544         switch (cdesc->qat_hash_alg) {
545         case ICP_QAT_HW_AUTH_ALGO_SHA1:
546                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
547                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
548                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
549                         return -EFAULT;
550                 }
551                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
552                 break;
553         case ICP_QAT_HW_AUTH_ALGO_SHA256:
554                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
555                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
556                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
557                         return -EFAULT;
558                 }
559                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
560                 break;
561         case ICP_QAT_HW_AUTH_ALGO_SHA512:
562                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
563                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
564                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
565                         return -EFAULT;
566                 }
567                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
568                 break;
569         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
570                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
571                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
572                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
573                         &state2_size)) {
574                         PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
575                         return -EFAULT;
576                 }
577                 break;
578         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
579         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
580                 proto = ICP_QAT_FW_LA_GCM_PROTO;
581                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
582                 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
583                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
584                         &state2_size)) {
585                         PMD_DRV_LOG(ERR, "(GCM)precompute failed");
586                         return -EFAULT;
587                 }
588                 /*
589                  * Write (the length of AAD) into bytes 16-19 of state2
590                  * in big-endian format. This field is 8 bytes
591                  */
592                 auth_param->u2.aad_sz =
593                                 RTE_ALIGN_CEIL(add_auth_data_length, 16);
594                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
595
596                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
597                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
598                                         ICP_QAT_HW_GALOIS_H_SZ);
599                 *aad_len = rte_bswap32(add_auth_data_length);
600                 break;
601         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
602                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
603                 state1_size = qat_hash_get_state1_size(
604                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
605                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
606                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
607
608                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
609                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
610                 cipherconfig->aes.cipher_config.val =
611                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
612                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
613                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
614                         ICP_QAT_HW_CIPHER_ENCRYPT);
615                 memcpy(cipherconfig->aes.key, authkey, authkeylen);
616                 memset(cipherconfig->aes.key + authkeylen,
617                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
618                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
619                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
620                 auth_param->hash_state_sz =
621                                 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
622                 break;
623         default:
624                 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
625                 return -EFAULT;
626         }
627
628         /* Request template setup */
629         qat_alg_init_common_hdr(header, proto);
630         header->service_cmd_id = cdesc->qat_cmd;
631
632         /* Auth CD config setup */
633         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
634         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
635         hash_cd_ctrl->inner_res_sz = digestsize;
636         hash_cd_ctrl->final_sz = digestsize;
637         hash_cd_ctrl->inner_state1_sz = state1_size;
638         auth_param->auth_res_sz = digestsize;
639
640         hash_cd_ctrl->inner_state2_sz  = state2_size;
641         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
642                         ((sizeof(struct icp_qat_hw_auth_setup) +
643                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
644                                         >> 3);
645
646         cdesc->cd_cur_ptr += state1_size + state2_size;
647         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
648
649         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
650         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
651
652         return 0;
653 }
654
655 static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
656                                         struct icp_qat_hw_cipher_algo_blk *cd,
657                                         const uint8_t *key, unsigned int keylen)
658 {
659         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
660         struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
661         struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
662
663         PMD_INIT_FUNC_TRACE();
664         rte_memcpy(cd->aes.key, key, keylen);
665         qat_alg_init_common_hdr(header, ICP_QAT_FW_LA_NO_PROTO);
666         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
667         cd_pars->u.s.content_desc_params_sz =
668                                 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
669         /* Cipher CD config setup */
670         cd_ctrl->cipher_key_sz = keylen >> 3;
671         cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
672         cd_ctrl->cipher_cfg_offset = 0;
673         ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
674         ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
675 }
676
677 void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
678                                         int alg, const uint8_t *key,
679                                         unsigned int keylen)
680 {
681         struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
682         struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
683         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
684
685         PMD_INIT_FUNC_TRACE();
686         qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
687         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
688         enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
689 }
690
691 void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
692                                         int alg, const uint8_t *key,
693                                         unsigned int keylen)
694 {
695         struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
696         struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
697         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
698
699         PMD_INIT_FUNC_TRACE();
700         qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
701         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
702         dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
703 }
704
705 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
706 {
707         switch (key_len) {
708         case ICP_QAT_HW_AES_128_KEY_SZ:
709                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
710                 break;
711         case ICP_QAT_HW_AES_192_KEY_SZ:
712                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
713                 break;
714         case ICP_QAT_HW_AES_256_KEY_SZ:
715                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
716                 break;
717         default:
718                 return -EINVAL;
719         }
720         return 0;
721 }
722
723 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
724 {
725         switch (key_len) {
726         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
727                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
728                 break;
729         default:
730                 return -EINVAL;
731         }
732         return 0;
733 }