crypto/qat: add DES capability
[dpdk.git] / drivers / crypto / qat / qat_adf / qat_algs_build_desc.c
1 /*
2  *  This file is provided under a dual BSD/GPLv2 license.  When using or
3  *  redistributing this file, you may do so under either license.
4  *
5  *  GPL LICENSE SUMMARY
6  *  Copyright(c) 2015-2016 Intel Corporation.
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of version 2 of the GNU General Public License as
9  *  published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope that it will be useful, but
12  *  WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  General Public License for more details.
15  *
16  *  Contact Information:
17  *  qat-linux@intel.com
18  *
19  *  BSD LICENSE
20  *  Copyright(c) 2015-2016 Intel Corporation.
21  *  Redistribution and use in source and binary forms, with or without
22  *  modification, are permitted provided that the following conditions
23  *  are met:
24  *
25  *      * Redistributions of source code must retain the above copyright
26  *        notice, this list of conditions and the following disclaimer.
27  *      * Redistributions in binary form must reproduce the above copyright
28  *        notice, this list of conditions and the following disclaimer in
29  *        the documentation and/or other materials provided with the
30  *        distribution.
31  *      * Neither the name of Intel Corporation nor the names of its
32  *        contributors may be used to endorse or promote products derived
33  *        from this software without specific prior written permission.
34  *
35  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
52 #include <rte_log.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
55
56 #include "../qat_logs.h"
57 #include "qat_algs.h"
58
59 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
61 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
62
63
64 /*
65  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
66  * This is digest size rounded up to nearest quadword
67  */
68 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
69 {
70         switch (qat_hash_alg) {
71         case ICP_QAT_HW_AUTH_ALGO_SHA1:
72                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
73                                                 QAT_HW_DEFAULT_ALIGNMENT);
74         case ICP_QAT_HW_AUTH_ALGO_SHA224:
75                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
76                                                 QAT_HW_DEFAULT_ALIGNMENT);
77         case ICP_QAT_HW_AUTH_ALGO_SHA256:
78                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
79                                                 QAT_HW_DEFAULT_ALIGNMENT);
80         case ICP_QAT_HW_AUTH_ALGO_SHA384:
81                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
82                                                 QAT_HW_DEFAULT_ALIGNMENT);
83         case ICP_QAT_HW_AUTH_ALGO_SHA512:
84                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
85                                                 QAT_HW_DEFAULT_ALIGNMENT);
86         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
87                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
88                                                 QAT_HW_DEFAULT_ALIGNMENT);
89         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
90         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
91                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
92                                                 QAT_HW_DEFAULT_ALIGNMENT);
93         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
94                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
95                                                 QAT_HW_DEFAULT_ALIGNMENT);
96         case ICP_QAT_HW_AUTH_ALGO_MD5:
97                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
98                                                 QAT_HW_DEFAULT_ALIGNMENT);
99         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
100                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
101                                                 QAT_HW_DEFAULT_ALIGNMENT);
102         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
103                 /* return maximum state1 size in this case */
104                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
105                                                 QAT_HW_DEFAULT_ALIGNMENT);
106         default:
107                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
108                 return -EFAULT;
109         };
110         return -EFAULT;
111 }
112
113 /* returns digest size in bytes  per hash algo */
114 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
115 {
116         switch (qat_hash_alg) {
117         case ICP_QAT_HW_AUTH_ALGO_SHA1:
118                 return ICP_QAT_HW_SHA1_STATE1_SZ;
119         case ICP_QAT_HW_AUTH_ALGO_SHA224:
120                 return ICP_QAT_HW_SHA224_STATE1_SZ;
121         case ICP_QAT_HW_AUTH_ALGO_SHA256:
122                 return ICP_QAT_HW_SHA256_STATE1_SZ;
123         case ICP_QAT_HW_AUTH_ALGO_SHA384:
124                 return ICP_QAT_HW_SHA384_STATE1_SZ;
125         case ICP_QAT_HW_AUTH_ALGO_SHA512:
126                 return ICP_QAT_HW_SHA512_STATE1_SZ;
127         case ICP_QAT_HW_AUTH_ALGO_MD5:
128                 return ICP_QAT_HW_MD5_STATE1_SZ;
129         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
130                 /* return maximum digest size in this case */
131                 return ICP_QAT_HW_SHA512_STATE1_SZ;
132         default:
133                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
134                 return -EFAULT;
135         };
136         return -EFAULT;
137 }
138
139 /* returns block size in byes per hash algo */
140 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
141 {
142         switch (qat_hash_alg) {
143         case ICP_QAT_HW_AUTH_ALGO_SHA1:
144                 return SHA_CBLOCK;
145         case ICP_QAT_HW_AUTH_ALGO_SHA224:
146                 return SHA256_CBLOCK;
147         case ICP_QAT_HW_AUTH_ALGO_SHA256:
148                 return SHA256_CBLOCK;
149         case ICP_QAT_HW_AUTH_ALGO_SHA384:
150                 return SHA512_CBLOCK;
151         case ICP_QAT_HW_AUTH_ALGO_SHA512:
152                 return SHA512_CBLOCK;
153         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
154                 return 16;
155         case ICP_QAT_HW_AUTH_ALGO_MD5:
156                 return MD5_CBLOCK;
157         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
158                 /* return maximum block size in this case */
159                 return SHA512_CBLOCK;
160         default:
161                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
162                 return -EFAULT;
163         };
164         return -EFAULT;
165 }
166
167 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
168 {
169         SHA_CTX ctx;
170
171         if (!SHA1_Init(&ctx))
172                 return -EFAULT;
173         SHA1_Transform(&ctx, data_in);
174         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
175         return 0;
176 }
177
178 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
179 {
180         SHA256_CTX ctx;
181
182         if (!SHA224_Init(&ctx))
183                 return -EFAULT;
184         SHA256_Transform(&ctx, data_in);
185         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
186         return 0;
187 }
188
189 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
190 {
191         SHA256_CTX ctx;
192
193         if (!SHA256_Init(&ctx))
194                 return -EFAULT;
195         SHA256_Transform(&ctx, data_in);
196         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
197         return 0;
198 }
199
200 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
201 {
202         SHA512_CTX ctx;
203
204         if (!SHA384_Init(&ctx))
205                 return -EFAULT;
206         SHA512_Transform(&ctx, data_in);
207         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
208         return 0;
209 }
210
211 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
212 {
213         SHA512_CTX ctx;
214
215         if (!SHA512_Init(&ctx))
216                 return -EFAULT;
217         SHA512_Transform(&ctx, data_in);
218         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
219         return 0;
220 }
221
222 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
223 {
224         MD5_CTX ctx;
225
226         if (!MD5_Init(&ctx))
227                 return -EFAULT;
228         MD5_Transform(&ctx, data_in);
229         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
230
231         return 0;
232 }
233
234 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
235                         uint8_t *data_in,
236                         uint8_t *data_out)
237 {
238         int digest_size;
239         uint8_t digest[qat_hash_get_digest_size(
240                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
241         uint32_t *hash_state_out_be32;
242         uint64_t *hash_state_out_be64;
243         int i;
244
245         PMD_INIT_FUNC_TRACE();
246         digest_size = qat_hash_get_digest_size(hash_alg);
247         if (digest_size <= 0)
248                 return -EFAULT;
249
250         hash_state_out_be32 = (uint32_t *)data_out;
251         hash_state_out_be64 = (uint64_t *)data_out;
252
253         switch (hash_alg) {
254         case ICP_QAT_HW_AUTH_ALGO_SHA1:
255                 if (partial_hash_sha1(data_in, digest))
256                         return -EFAULT;
257                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
258                         *hash_state_out_be32 =
259                                 rte_bswap32(*(((uint32_t *)digest)+i));
260                 break;
261         case ICP_QAT_HW_AUTH_ALGO_SHA224:
262                 if (partial_hash_sha224(data_in, digest))
263                         return -EFAULT;
264                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
265                         *hash_state_out_be32 =
266                                 rte_bswap32(*(((uint32_t *)digest)+i));
267                 break;
268         case ICP_QAT_HW_AUTH_ALGO_SHA256:
269                 if (partial_hash_sha256(data_in, digest))
270                         return -EFAULT;
271                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
272                         *hash_state_out_be32 =
273                                 rte_bswap32(*(((uint32_t *)digest)+i));
274                 break;
275         case ICP_QAT_HW_AUTH_ALGO_SHA384:
276                 if (partial_hash_sha384(data_in, digest))
277                         return -EFAULT;
278                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
279                         *hash_state_out_be64 =
280                                 rte_bswap64(*(((uint64_t *)digest)+i));
281                 break;
282         case ICP_QAT_HW_AUTH_ALGO_SHA512:
283                 if (partial_hash_sha512(data_in, digest))
284                         return -EFAULT;
285                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
286                         *hash_state_out_be64 =
287                                 rte_bswap64(*(((uint64_t *)digest)+i));
288                 break;
289         case ICP_QAT_HW_AUTH_ALGO_MD5:
290                 if (partial_hash_md5(data_in, data_out))
291                         return -EFAULT;
292                 break;
293         default:
294                 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
295                 return -EFAULT;
296         }
297
298         return 0;
299 }
300 #define HMAC_IPAD_VALUE 0x36
301 #define HMAC_OPAD_VALUE 0x5c
302 #define HASH_XCBC_PRECOMP_KEY_NUM 3
303
304 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
305                                 const uint8_t *auth_key,
306                                 uint16_t auth_keylen,
307                                 uint8_t *p_state_buf,
308                                 uint16_t *p_state_len)
309 {
310         int block_size;
311         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
312         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
313         int i;
314
315         PMD_INIT_FUNC_TRACE();
316         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
317                 static uint8_t qat_aes_xcbc_key_seed[
318                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
319                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
320                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
321                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
322                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
323                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
324                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
325                 };
326
327                 uint8_t *in = NULL;
328                 uint8_t *out = p_state_buf;
329                 int x;
330                 AES_KEY enc_key;
331
332                 in = rte_zmalloc("working mem for key",
333                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
334                 rte_memcpy(in, qat_aes_xcbc_key_seed,
335                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
336                 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
337                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
338                                 &enc_key) != 0) {
339                                 rte_free(in -
340                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
341                                 memset(out -
342                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
343                                         0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
344                                 return -EFAULT;
345                         }
346                         AES_encrypt(in, out, &enc_key);
347                         in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
348                         out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
349                 }
350                 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
351                 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
352                 return 0;
353         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
354                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
355                 uint8_t *in = NULL;
356                 uint8_t *out = p_state_buf;
357                 AES_KEY enc_key;
358
359                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
360                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
361                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
362                 in = rte_zmalloc("working mem for key",
363                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
364                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
365                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
366                         &enc_key) != 0) {
367                         return -EFAULT;
368                 }
369                 AES_encrypt(in, out, &enc_key);
370                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
371                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
372                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
373                 rte_free(in);
374                 return 0;
375         }
376
377         block_size = qat_hash_get_block_size(hash_alg);
378         if (block_size <= 0)
379                 return -EFAULT;
380         /* init ipad and opad from key and xor with fixed values */
381         memset(ipad, 0, block_size);
382         memset(opad, 0, block_size);
383
384         if (auth_keylen > (unsigned int)block_size) {
385                 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
386                 return -EFAULT;
387         }
388         rte_memcpy(ipad, auth_key, auth_keylen);
389         rte_memcpy(opad, auth_key, auth_keylen);
390
391         for (i = 0; i < block_size; i++) {
392                 uint8_t *ipad_ptr = ipad + i;
393                 uint8_t *opad_ptr = opad + i;
394                 *ipad_ptr ^= HMAC_IPAD_VALUE;
395                 *opad_ptr ^= HMAC_OPAD_VALUE;
396         }
397
398         /* do partial hash of ipad and copy to state1 */
399         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
400                 memset(ipad, 0, block_size);
401                 memset(opad, 0, block_size);
402                 PMD_DRV_LOG(ERR, "ipad precompute failed");
403                 return -EFAULT;
404         }
405
406         /*
407          * State len is a multiple of 8, so may be larger than the digest.
408          * Put the partial hash of opad state_len bytes after state1
409          */
410         *p_state_len = qat_hash_get_state1_size(hash_alg);
411         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
412                 memset(ipad, 0, block_size);
413                 memset(opad, 0, block_size);
414                 PMD_DRV_LOG(ERR, "opad precompute failed");
415                 return -EFAULT;
416         }
417
418         /*  don't leave data lying around */
419         memset(ipad, 0, block_size);
420         memset(opad, 0, block_size);
421         return 0;
422 }
423
424 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
425                 uint16_t proto)
426 {
427         PMD_INIT_FUNC_TRACE();
428         header->hdr_flags =
429                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
430         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
431         header->comn_req_flags =
432                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
433                                         QAT_COMN_PTR_TYPE_FLAT);
434         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
435                                   ICP_QAT_FW_LA_PARTIAL_NONE);
436         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
437                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
438         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
439                                 proto);
440         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
441                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
442         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
443                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
444 }
445
446 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
447                                                 uint8_t *cipherkey,
448                                                 uint32_t cipherkeylen)
449 {
450         struct icp_qat_hw_cipher_algo_blk *cipher;
451         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
452         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
453         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
454         void *ptr = &req_tmpl->cd_ctrl;
455         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
456         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
457         enum icp_qat_hw_cipher_convert key_convert;
458         uint32_t total_key_size;
459         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/SNOW 3G */
460         uint16_t cipher_offset, cd_size;
461         uint32_t wordIndex  = 0;
462         uint32_t *temp_key = NULL;
463         PMD_INIT_FUNC_TRACE();
464
465         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
466                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
467                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
468                                         ICP_QAT_FW_SLICE_CIPHER);
469                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
470                                         ICP_QAT_FW_SLICE_DRAM_WR);
471                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
472                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
473                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
474                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
475                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
476         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
477                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
478                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
479                                         ICP_QAT_FW_SLICE_CIPHER);
480                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
481                                         ICP_QAT_FW_SLICE_AUTH);
482                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
483                                         ICP_QAT_FW_SLICE_AUTH);
484                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
485                                         ICP_QAT_FW_SLICE_DRAM_WR);
486                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
487         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
488                 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
489                 return -EFAULT;
490         }
491
492         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
493                 /*
494                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
495                  * Overriding default values previously set
496                  */
497                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
498                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
499         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
500                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
501         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
502                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
503         else
504                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
505
506         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
507                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
508                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
509                 cipher_cd_ctrl->cipher_state_sz =
510                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
511                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
512         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
513                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
514                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
515                 cipher_cd_ctrl->cipher_padding_sz =
516                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
517         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
518                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
519                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
520                 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
521         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
522                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
523                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
524                 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
525         } else {
526                 total_key_size = cipherkeylen;
527                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
528                 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
529         }
530         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
531         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
532         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
533
534         header->service_cmd_id = cdesc->qat_cmd;
535         qat_alg_init_common_hdr(header, proto);
536
537         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
538
539         cipher->cipher_config.val =
540             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
541                                         cdesc->qat_cipher_alg, key_convert,
542                                         cdesc->qat_dir);
543
544         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
545                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
546                                         sizeof(struct icp_qat_hw_cipher_config)
547                                         + cipherkeylen);
548                 memcpy(cipher->key, cipherkey, cipherkeylen);
549                 memcpy(temp_key, cipherkey, cipherkeylen);
550
551                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
552                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
553                                                                 wordIndex++)
554                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
555
556                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
557                                         cipherkeylen + cipherkeylen;
558         } else {
559                 memcpy(cipher->key, cipherkey, cipherkeylen);
560                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
561                                         cipherkeylen;
562         }
563
564         if (total_key_size > cipherkeylen) {
565                 uint32_t padding_size =  total_key_size-cipherkeylen;
566                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
567                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
568                         /* K3 not provided so use K1 = K3*/
569                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
570                 else
571                         memset(cdesc->cd_cur_ptr, 0, padding_size);
572                 cdesc->cd_cur_ptr += padding_size;
573         }
574         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
575         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
576
577         return 0;
578 }
579
580 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
581                                                 uint8_t *authkey,
582                                                 uint32_t authkeylen,
583                                                 uint32_t add_auth_data_length,
584                                                 uint32_t digestsize,
585                                                 unsigned int operation)
586 {
587         struct icp_qat_hw_auth_setup *hash;
588         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
589         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
590         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
591         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
592         void *ptr = &req_tmpl->cd_ctrl;
593         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
594         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
595         struct icp_qat_fw_la_auth_req_params *auth_param =
596                 (struct icp_qat_fw_la_auth_req_params *)
597                 ((char *)&req_tmpl->serv_specif_rqpars +
598                 sizeof(struct icp_qat_fw_la_cipher_req_params));
599         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/SNOW 3G */
600         uint16_t state1_size = 0, state2_size = 0;
601         uint16_t hash_offset, cd_size;
602         uint32_t *aad_len = NULL;
603         uint32_t wordIndex  = 0;
604         uint32_t *pTempKey;
605
606         PMD_INIT_FUNC_TRACE();
607
608         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
609                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
610                                         ICP_QAT_FW_SLICE_AUTH);
611                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
612                                         ICP_QAT_FW_SLICE_DRAM_WR);
613                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
614         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
615                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
616                                 ICP_QAT_FW_SLICE_AUTH);
617                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
618                                 ICP_QAT_FW_SLICE_CIPHER);
619                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
620                                 ICP_QAT_FW_SLICE_CIPHER);
621                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
622                                 ICP_QAT_FW_SLICE_DRAM_WR);
623                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
624         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
625                 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
626                 return -EFAULT;
627         }
628
629         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
630                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
631                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
632                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
633                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
634                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
635         } else {
636                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
637                                            ICP_QAT_FW_LA_RET_AUTH_RES);
638                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
639                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
640                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
641         }
642
643         /*
644          * Setup the inner hash config
645          */
646         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
647         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
648         hash->auth_config.reserved = 0;
649         hash->auth_config.config =
650                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
651                                 cdesc->qat_hash_alg, digestsize);
652
653         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
654                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9)
655                 hash->auth_counter.counter = 0;
656         else
657                 hash->auth_counter.counter = rte_bswap32(
658                                 qat_hash_get_block_size(cdesc->qat_hash_alg));
659
660         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
661
662         /*
663          * cd_cur_ptr now points at the state1 information.
664          */
665         switch (cdesc->qat_hash_alg) {
666         case ICP_QAT_HW_AUTH_ALGO_SHA1:
667                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
668                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
669                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
670                         return -EFAULT;
671                 }
672                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
673                 break;
674         case ICP_QAT_HW_AUTH_ALGO_SHA224:
675                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
676                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
677                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
678                         return -EFAULT;
679                 }
680                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
681                 break;
682         case ICP_QAT_HW_AUTH_ALGO_SHA256:
683                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
684                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
685                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
686                         return -EFAULT;
687                 }
688                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
689                 break;
690         case ICP_QAT_HW_AUTH_ALGO_SHA384:
691                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
692                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
693                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
694                         return -EFAULT;
695                 }
696                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
697                 break;
698         case ICP_QAT_HW_AUTH_ALGO_SHA512:
699                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
700                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
701                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
702                         return -EFAULT;
703                 }
704                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
705                 break;
706         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
707                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
708                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
709                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
710                         &state2_size)) {
711                         PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
712                         return -EFAULT;
713                 }
714                 break;
715         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
716         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
717                 proto = ICP_QAT_FW_LA_GCM_PROTO;
718                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
719                 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
720                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
721                         &state2_size)) {
722                         PMD_DRV_LOG(ERR, "(GCM)precompute failed");
723                         return -EFAULT;
724                 }
725                 /*
726                  * Write (the length of AAD) into bytes 16-19 of state2
727                  * in big-endian format. This field is 8 bytes
728                  */
729                 auth_param->u2.aad_sz =
730                                 RTE_ALIGN_CEIL(add_auth_data_length, 16);
731                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
732
733                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
734                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
735                                         ICP_QAT_HW_GALOIS_H_SZ);
736                 *aad_len = rte_bswap32(add_auth_data_length);
737                 break;
738         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
739                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
740                 state1_size = qat_hash_get_state1_size(
741                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
742                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
743                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
744
745                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
746                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
747                 cipherconfig->cipher_config.val =
748                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
749                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
750                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
751                         ICP_QAT_HW_CIPHER_ENCRYPT);
752                 memcpy(cipherconfig->key, authkey, authkeylen);
753                 memset(cipherconfig->key + authkeylen,
754                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
755                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
756                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
757                 auth_param->hash_state_sz =
758                                 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
759                 break;
760         case ICP_QAT_HW_AUTH_ALGO_MD5:
761                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
762                         authkey, authkeylen, cdesc->cd_cur_ptr,
763                         &state1_size)) {
764                         PMD_DRV_LOG(ERR, "(MD5)precompute failed");
765                         return -EFAULT;
766                 }
767                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
768                 break;
769         case ICP_QAT_HW_AUTH_ALGO_NULL:
770                 break;
771         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
772                 state1_size = qat_hash_get_state1_size(
773                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
774                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
775                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
776                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
777                                                         + authkeylen);
778                 /*
779                 * The Inner Hash Initial State2 block must contain IK
780                 * (Initialisation Key), followed by IK XOR-ed with KM
781                 * (Key Modifier): IK||(IK^KM).
782                 */
783                 /* write the auth key */
784                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
785                 /* initialise temp key with auth key */
786                 memcpy(pTempKey, authkey, authkeylen);
787                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
788                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
789                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
790                 break;
791         default:
792                 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
793                 return -EFAULT;
794         }
795
796         /* Request template setup */
797         qat_alg_init_common_hdr(header, proto);
798         header->service_cmd_id = cdesc->qat_cmd;
799
800         /* Auth CD config setup */
801         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
802         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
803         hash_cd_ctrl->inner_res_sz = digestsize;
804         hash_cd_ctrl->final_sz = digestsize;
805         hash_cd_ctrl->inner_state1_sz = state1_size;
806         auth_param->auth_res_sz = digestsize;
807
808         hash_cd_ctrl->inner_state2_sz  = state2_size;
809         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
810                         ((sizeof(struct icp_qat_hw_auth_setup) +
811                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
812                                         >> 3);
813
814         cdesc->cd_cur_ptr += state1_size + state2_size;
815         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
816
817         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
818         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
819
820         return 0;
821 }
822
823 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
824 {
825         switch (key_len) {
826         case ICP_QAT_HW_AES_128_KEY_SZ:
827                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
828                 break;
829         case ICP_QAT_HW_AES_192_KEY_SZ:
830                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
831                 break;
832         case ICP_QAT_HW_AES_256_KEY_SZ:
833                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
834                 break;
835         default:
836                 return -EINVAL;
837         }
838         return 0;
839 }
840
841 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
842 {
843         switch (key_len) {
844         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
845                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
846                 break;
847         default:
848                 return -EINVAL;
849         }
850         return 0;
851 }
852
853 int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
854 {
855         switch (key_len) {
856         case ICP_QAT_HW_KASUMI_KEY_SZ:
857                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
858                 break;
859         default:
860                 return -EINVAL;
861         }
862         return 0;
863 }
864
865 int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
866 {
867         switch (key_len) {
868         case ICP_QAT_HW_DES_KEY_SZ:
869                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
870                 break;
871         default:
872                 return -EINVAL;
873         }
874         return 0;
875 }
876
877 int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
878 {
879         switch (key_len) {
880         case QAT_3DES_KEY_SZ_OPT1:
881         case QAT_3DES_KEY_SZ_OPT2:
882                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
883                 break;
884         default:
885                 return -EINVAL;
886         }
887         return 0;
888 }