86e2f2641acfe1ae9df5f57647da97a06f1a1c05
[dpdk.git] / drivers / crypto / qat / qat_sym_session.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  * Copyright(c) 2015-2019 Intel Corporation
3  */
4
5 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
6 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
7 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
8 #include <openssl/evp.h>        /* Needed for bpi runt block processing */
9
10 #include <rte_memcpy.h>
11 #include <rte_common.h>
12 #include <rte_spinlock.h>
13 #include <rte_byteorder.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_crypto_sym.h>
17 #ifdef RTE_LIB_SECURITY
18 #include <rte_security.h>
19 #endif
20
21 #include "qat_logs.h"
22 #include "qat_sym_session.h"
23 #include "qat_sym_pmd.h"
24
25 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
26 static const uint8_t sha1InitialState[] = {
27         0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
28         0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
29
30 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
31 static const uint8_t sha224InitialState[] = {
32         0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
33         0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
34         0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
35
36 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
37 static const uint8_t sha256InitialState[] = {
38         0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
39         0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
40         0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
41
42 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
43 static const uint8_t sha384InitialState[] = {
44         0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
45         0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
46         0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
47         0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
48         0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
49         0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
50
51 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
52 static const uint8_t sha512InitialState[] = {
53         0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
54         0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
55         0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
56         0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
57         0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
58         0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
59
60 static int
61 qat_sym_cd_cipher_set(struct qat_sym_session *cd,
62                                                 const uint8_t *enckey,
63                                                 uint32_t enckeylen);
64
65 static int
66 qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
67                                                 const uint8_t *authkey,
68                                                 uint32_t authkeylen,
69                                                 uint32_t aad_length,
70                                                 uint32_t digestsize,
71                                                 unsigned int operation);
72
73 /** Frees a context previously created
74  *  Depends on openssl libcrypto
75  */
76 static void
77 bpi_cipher_ctx_free(void *bpi_ctx)
78 {
79         if (bpi_ctx != NULL)
80                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
81 }
82
83 /** Creates a context in either AES or DES in ECB mode
84  *  Depends on openssl libcrypto
85  */
86 static int
87 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
88                 enum rte_crypto_cipher_operation direction __rte_unused,
89                 const uint8_t *key, uint16_t key_length, void **ctx)
90 {
91         const EVP_CIPHER *algo = NULL;
92         int ret;
93         *ctx = EVP_CIPHER_CTX_new();
94
95         if (*ctx == NULL) {
96                 ret = -ENOMEM;
97                 goto ctx_init_err;
98         }
99
100         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
101                 algo = EVP_des_ecb();
102         else
103                 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
104                         algo = EVP_aes_128_ecb();
105                 else
106                         algo = EVP_aes_256_ecb();
107
108         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
109         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
110                 ret = -EINVAL;
111                 goto ctx_init_err;
112         }
113
114         return 0;
115
116 ctx_init_err:
117         if (*ctx != NULL)
118                 EVP_CIPHER_CTX_free(*ctx);
119         return ret;
120 }
121
122 static int
123 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
124                 struct qat_sym_dev_private *internals)
125 {
126         int i = 0;
127         const struct rte_cryptodev_capabilities *capability;
128
129         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
130                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
131                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
132                         continue;
133
134                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
135                         continue;
136
137                 if (capability->sym.cipher.algo == algo)
138                         return 1;
139         }
140         return 0;
141 }
142
143 static int
144 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
145                 struct qat_sym_dev_private *internals)
146 {
147         int i = 0;
148         const struct rte_cryptodev_capabilities *capability;
149
150         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
151                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
152                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
153                         continue;
154
155                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
156                         continue;
157
158                 if (capability->sym.auth.algo == algo)
159                         return 1;
160         }
161         return 0;
162 }
163
164 void
165 qat_sym_session_clear(struct rte_cryptodev *dev,
166                 struct rte_cryptodev_sym_session *sess)
167 {
168         uint8_t index = dev->driver_id;
169         void *sess_priv = get_sym_session_private_data(sess, index);
170         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
171
172         if (sess_priv) {
173                 if (s->bpi_ctx)
174                         bpi_cipher_ctx_free(s->bpi_ctx);
175                 memset(s, 0, qat_sym_session_get_private_size(dev));
176                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
177
178                 set_sym_session_private_data(sess, index, NULL);
179                 rte_mempool_put(sess_mp, sess_priv);
180         }
181 }
182
183 static int
184 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
185 {
186         /* Cipher Only */
187         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
188                 return ICP_QAT_FW_LA_CMD_CIPHER;
189
190         /* Authentication Only */
191         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
192                 return ICP_QAT_FW_LA_CMD_AUTH;
193
194         /* AEAD */
195         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
196                 /* AES-GCM and AES-CCM works with different direction
197                  * GCM first encrypts and generate hash where AES-CCM
198                  * first generate hash and encrypts. Similar relation
199                  * applies to decryption.
200                  */
201                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
202                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
203                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
204                         else
205                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
206                 else
207                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
208                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
209                         else
210                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
211         }
212
213         if (xform->next == NULL)
214                 return -1;
215
216         /* Cipher then Authenticate */
217         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
218                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
219                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
220
221         /* Authenticate then Cipher */
222         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
223                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
224                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
225
226         return -1;
227 }
228
229 static struct rte_crypto_auth_xform *
230 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
231 {
232         do {
233                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
234                         return &xform->auth;
235
236                 xform = xform->next;
237         } while (xform);
238
239         return NULL;
240 }
241
242 static struct rte_crypto_cipher_xform *
243 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
244 {
245         do {
246                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
247                         return &xform->cipher;
248
249                 xform = xform->next;
250         } while (xform);
251
252         return NULL;
253 }
254
255 int
256 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
257                 struct rte_crypto_sym_xform *xform,
258                 struct qat_sym_session *session)
259 {
260         struct qat_sym_dev_private *internals = dev->data->dev_private;
261         struct rte_crypto_cipher_xform *cipher_xform = NULL;
262         enum qat_device_gen qat_dev_gen =
263                                 internals->qat_dev->qat_dev_gen;
264         int ret;
265
266         /* Get cipher xform from crypto xform chain */
267         cipher_xform = qat_get_cipher_xform(xform);
268
269         session->cipher_iv.offset = cipher_xform->iv.offset;
270         session->cipher_iv.length = cipher_xform->iv.length;
271
272         switch (cipher_xform->algo) {
273         case RTE_CRYPTO_CIPHER_AES_CBC:
274                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
275                                 &session->qat_cipher_alg) != 0) {
276                         QAT_LOG(ERR, "Invalid AES cipher key size");
277                         ret = -EINVAL;
278                         goto error_out;
279                 }
280                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
281                 break;
282         case RTE_CRYPTO_CIPHER_AES_CTR:
283                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
284                                 &session->qat_cipher_alg) != 0) {
285                         QAT_LOG(ERR, "Invalid AES cipher key size");
286                         ret = -EINVAL;
287                         goto error_out;
288                 }
289                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
290                 if (qat_dev_gen == QAT_GEN4)
291                         session->is_ucs = 1;
292                 break;
293         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
294                 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
295                                         &session->qat_cipher_alg) != 0) {
296                         QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
297                         ret = -EINVAL;
298                         goto error_out;
299                 }
300                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
301                 break;
302         case RTE_CRYPTO_CIPHER_NULL:
303                 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
304                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
305                 break;
306         case RTE_CRYPTO_CIPHER_KASUMI_F8:
307                 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
308                                         &session->qat_cipher_alg) != 0) {
309                         QAT_LOG(ERR, "Invalid KASUMI cipher key size");
310                         ret = -EINVAL;
311                         goto error_out;
312                 }
313                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
314                 break;
315         case RTE_CRYPTO_CIPHER_3DES_CBC:
316                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
317                                 &session->qat_cipher_alg) != 0) {
318                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
319                         ret = -EINVAL;
320                         goto error_out;
321                 }
322                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
323                 break;
324         case RTE_CRYPTO_CIPHER_DES_CBC:
325                 if (qat_sym_validate_des_key(cipher_xform->key.length,
326                                 &session->qat_cipher_alg) != 0) {
327                         QAT_LOG(ERR, "Invalid DES cipher key size");
328                         ret = -EINVAL;
329                         goto error_out;
330                 }
331                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
332                 break;
333         case RTE_CRYPTO_CIPHER_3DES_CTR:
334                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
335                                 &session->qat_cipher_alg) != 0) {
336                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
337                         ret = -EINVAL;
338                         goto error_out;
339                 }
340                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
341                 break;
342         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
343                 ret = bpi_cipher_ctx_init(
344                                         cipher_xform->algo,
345                                         cipher_xform->op,
346                                         cipher_xform->key.data,
347                                         cipher_xform->key.length,
348                                         &session->bpi_ctx);
349                 if (ret != 0) {
350                         QAT_LOG(ERR, "failed to create DES BPI ctx");
351                         goto error_out;
352                 }
353                 if (qat_sym_validate_des_key(cipher_xform->key.length,
354                                 &session->qat_cipher_alg) != 0) {
355                         QAT_LOG(ERR, "Invalid DES cipher key size");
356                         ret = -EINVAL;
357                         goto error_out;
358                 }
359                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
360                 break;
361         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
362                 ret = bpi_cipher_ctx_init(
363                                         cipher_xform->algo,
364                                         cipher_xform->op,
365                                         cipher_xform->key.data,
366                                         cipher_xform->key.length,
367                                         &session->bpi_ctx);
368                 if (ret != 0) {
369                         QAT_LOG(ERR, "failed to create AES BPI ctx");
370                         goto error_out;
371                 }
372                 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
373                                 &session->qat_cipher_alg) != 0) {
374                         QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
375                         ret = -EINVAL;
376                         goto error_out;
377                 }
378                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
379                 break;
380         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
381                 if (!qat_is_cipher_alg_supported(
382                         cipher_xform->algo, internals)) {
383                         QAT_LOG(ERR, "%s not supported on this device",
384                                 rte_crypto_cipher_algorithm_strings
385                                         [cipher_xform->algo]);
386                         ret = -ENOTSUP;
387                         goto error_out;
388                 }
389                 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
390                                 &session->qat_cipher_alg) != 0) {
391                         QAT_LOG(ERR, "Invalid ZUC cipher key size");
392                         ret = -EINVAL;
393                         goto error_out;
394                 }
395                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
396                 break;
397         case RTE_CRYPTO_CIPHER_AES_XTS:
398                 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
399                         QAT_LOG(ERR, "AES-XTS-192 not supported");
400                         ret = -EINVAL;
401                         goto error_out;
402                 }
403                 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
404                                 &session->qat_cipher_alg) != 0) {
405                         QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
406                         ret = -EINVAL;
407                         goto error_out;
408                 }
409                 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
410                 break;
411         case RTE_CRYPTO_CIPHER_3DES_ECB:
412         case RTE_CRYPTO_CIPHER_AES_ECB:
413         case RTE_CRYPTO_CIPHER_AES_F8:
414         case RTE_CRYPTO_CIPHER_ARC4:
415                 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
416                                 cipher_xform->algo);
417                 ret = -ENOTSUP;
418                 goto error_out;
419         default:
420                 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
421                                 cipher_xform->algo);
422                 ret = -EINVAL;
423                 goto error_out;
424         }
425
426         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
427                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
428         else
429                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
430
431         if (qat_sym_cd_cipher_set(session,
432                                                 cipher_xform->key.data,
433                                                 cipher_xform->key.length)) {
434                 ret = -EINVAL;
435                 goto error_out;
436         }
437
438         return 0;
439
440 error_out:
441         if (session->bpi_ctx) {
442                 bpi_cipher_ctx_free(session->bpi_ctx);
443                 session->bpi_ctx = NULL;
444         }
445         return ret;
446 }
447
448 int
449 qat_sym_session_configure(struct rte_cryptodev *dev,
450                 struct rte_crypto_sym_xform *xform,
451                 struct rte_cryptodev_sym_session *sess,
452                 struct rte_mempool *mempool)
453 {
454         void *sess_private_data;
455         int ret;
456
457         if (rte_mempool_get(mempool, &sess_private_data)) {
458                 CDEV_LOG_ERR(
459                         "Couldn't get object from session mempool");
460                 return -ENOMEM;
461         }
462
463         ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
464         if (ret != 0) {
465                 QAT_LOG(ERR,
466                     "Crypto QAT PMD: failed to configure session parameters");
467
468                 /* Return session to mempool */
469                 rte_mempool_put(mempool, sess_private_data);
470                 return ret;
471         }
472
473         set_sym_session_private_data(sess, dev->driver_id,
474                 sess_private_data);
475
476         return 0;
477 }
478
479 static void
480 qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
481                 uint8_t hash_flag)
482 {
483         struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
484         struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
485                         (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
486                         session->fw_req.cd_ctrl.content_desc_ctrl_lw;
487
488         /* Set the Use Extended Protocol Flags bit in LW 1 */
489         QAT_FIELD_SET(header->comn_req_flags,
490                         QAT_COMN_EXT_FLAGS_USED,
491                         QAT_COMN_EXT_FLAGS_BITPOS,
492                         QAT_COMN_EXT_FLAGS_MASK);
493
494         /* Set Hash Flags in LW 28 */
495         cd_ctrl->hash_flags |= hash_flag;
496
497         /* Set proto flags in LW 1 */
498         switch (session->qat_cipher_alg) {
499         case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
500                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
501                                 ICP_QAT_FW_LA_SNOW_3G_PROTO);
502                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
503                                 header->serv_specif_flags, 0);
504                 break;
505         case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
506                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
507                                 ICP_QAT_FW_LA_NO_PROTO);
508                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
509                                 header->serv_specif_flags,
510                                 ICP_QAT_FW_LA_ZUC_3G_PROTO);
511                 break;
512         default:
513                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
514                                 ICP_QAT_FW_LA_NO_PROTO);
515                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
516                                 header->serv_specif_flags, 0);
517                 break;
518         }
519 }
520
521 static void
522 qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
523                 struct qat_sym_session *session)
524 {
525         const struct qat_sym_dev_private *qat_private = dev->data->dev_private;
526         enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
527                         QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
528
529         if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
530                         session->qat_cipher_alg !=
531                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
532                 session->min_qat_dev_gen = min_dev_gen;
533                 qat_sym_session_set_ext_hash_flags(session,
534                         1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
535         } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
536                         session->qat_cipher_alg !=
537                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
538                 session->min_qat_dev_gen = min_dev_gen;
539                 qat_sym_session_set_ext_hash_flags(session,
540                         1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
541         } else if ((session->aes_cmac ||
542                         session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
543                         (session->qat_cipher_alg ==
544                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
545                         session->qat_cipher_alg ==
546                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
547                 session->min_qat_dev_gen = min_dev_gen;
548                 qat_sym_session_set_ext_hash_flags(session, 0);
549         }
550 }
551
552 int
553 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
554                 struct rte_crypto_sym_xform *xform, void *session_private)
555 {
556         struct qat_sym_session *session = session_private;
557         struct qat_sym_dev_private *internals = dev->data->dev_private;
558         enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
559         int ret;
560         int qat_cmd_id;
561
562         /* Verify the session physical address is known */
563         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
564         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
565                 QAT_LOG(ERR,
566                         "Session physical address unknown. Bad memory pool.");
567                 return -EINVAL;
568         }
569
570         memset(session, 0, sizeof(*session));
571         /* Set context descriptor physical address */
572         session->cd_paddr = session_paddr +
573                         offsetof(struct qat_sym_session, cd);
574
575         session->min_qat_dev_gen = QAT_GEN1;
576         session->is_ucs = 0;
577
578         /* Get requested QAT command id */
579         qat_cmd_id = qat_get_cmd_id(xform);
580         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
581                 QAT_LOG(ERR, "Unsupported xform chain requested");
582                 return -ENOTSUP;
583         }
584         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
585         switch (session->qat_cmd) {
586         case ICP_QAT_FW_LA_CMD_CIPHER:
587                 ret = qat_sym_session_configure_cipher(dev, xform, session);
588                 if (ret < 0)
589                         return ret;
590                 break;
591         case ICP_QAT_FW_LA_CMD_AUTH:
592                 ret = qat_sym_session_configure_auth(dev, xform, session);
593                 if (ret < 0)
594                         return ret;
595                 session->is_single_pass_gmac =
596                                qat_dev_gen == QAT_GEN3 &&
597                                xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
598                                xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
599                 break;
600         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
601                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
602                         ret = qat_sym_session_configure_aead(dev, xform,
603                                         session);
604                         if (ret < 0)
605                                 return ret;
606                 } else {
607                         ret = qat_sym_session_configure_cipher(dev,
608                                         xform, session);
609                         if (ret < 0)
610                                 return ret;
611                         ret = qat_sym_session_configure_auth(dev,
612                                         xform, session);
613                         if (ret < 0)
614                                 return ret;
615                         /* Special handling of mixed hash+cipher algorithms */
616                         qat_sym_session_handle_mixed(dev, session);
617                 }
618                 break;
619         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
620                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
621                         ret = qat_sym_session_configure_aead(dev, xform,
622                                         session);
623                         if (ret < 0)
624                                 return ret;
625                 } else {
626                         ret = qat_sym_session_configure_auth(dev,
627                                         xform, session);
628                         if (ret < 0)
629                                 return ret;
630                         ret = qat_sym_session_configure_cipher(dev,
631                                         xform, session);
632                         if (ret < 0)
633                                 return ret;
634                         /* Special handling of mixed hash+cipher algorithms */
635                         qat_sym_session_handle_mixed(dev, session);
636                 }
637                 break;
638         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
639         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
640         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
641         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
642         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
643         case ICP_QAT_FW_LA_CMD_MGF1:
644         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
645         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
646         case ICP_QAT_FW_LA_CMD_DELIMITER:
647         QAT_LOG(ERR, "Unsupported Service %u",
648                 session->qat_cmd);
649                 return -ENOTSUP;
650         default:
651         QAT_LOG(ERR, "Unsupported Service %u",
652                 session->qat_cmd);
653                 return -ENOTSUP;
654         }
655
656         return 0;
657 }
658
659 static int
660 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
661                 struct rte_crypto_aead_xform *aead_xform)
662 {
663         struct icp_qat_fw_la_cipher_req_params *cipher_param =
664                         (void *) &session->fw_req.serv_specif_rqpars;
665
666         session->is_single_pass = 1;
667         session->min_qat_dev_gen = QAT_GEN3;
668         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
669         if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
670                 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
671                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
672                         session->fw_req.comn_hdr.serv_specif_flags,
673                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
674         } else {
675                 /* Chacha-Poly is special case that use QAT CTR mode */
676                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
677         }
678         session->cipher_iv.offset = aead_xform->iv.offset;
679         session->cipher_iv.length = aead_xform->iv.length;
680         if (qat_sym_cd_cipher_set(session,
681                         aead_xform->key.data, aead_xform->key.length))
682                 return -EINVAL;
683         session->aad_len = aead_xform->aad_length;
684         session->digest_length = aead_xform->digest_length;
685         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
686                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
687                 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
688                 ICP_QAT_FW_LA_RET_AUTH_SET(
689                         session->fw_req.comn_hdr.serv_specif_flags,
690                         ICP_QAT_FW_LA_RET_AUTH_RES);
691         } else {
692                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
693                 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
694                 ICP_QAT_FW_LA_CMP_AUTH_SET(
695                         session->fw_req.comn_hdr.serv_specif_flags,
696                         ICP_QAT_FW_LA_CMP_AUTH_RES);
697         }
698         ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
699                         session->fw_req.comn_hdr.serv_specif_flags,
700                         ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
701         ICP_QAT_FW_LA_PROTO_SET(
702                         session->fw_req.comn_hdr.serv_specif_flags,
703                         ICP_QAT_FW_LA_NO_PROTO);
704         session->fw_req.comn_hdr.service_cmd_id =
705                         ICP_QAT_FW_LA_CMD_CIPHER;
706         session->cd.cipher.cipher_config.val =
707                         ICP_QAT_HW_CIPHER_CONFIG_BUILD(
708                                 ICP_QAT_HW_CIPHER_AEAD_MODE,
709                                 session->qat_cipher_alg,
710                                 ICP_QAT_HW_CIPHER_NO_CONVERT,
711                                 session->qat_dir);
712         QAT_FIELD_SET(session->cd.cipher.cipher_config.val,
713                         aead_xform->digest_length,
714                         QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
715                         QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
716         session->cd.cipher.cipher_config.reserved =
717                         ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
718                                 aead_xform->aad_length);
719         cipher_param->spc_aad_sz = aead_xform->aad_length;
720         cipher_param->spc_auth_res_sz = aead_xform->digest_length;
721
722         return 0;
723 }
724
725 int
726 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
727                                 struct rte_crypto_sym_xform *xform,
728                                 struct qat_sym_session *session)
729 {
730         struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
731         struct qat_sym_dev_private *internals = dev->data->dev_private;
732         const uint8_t *key_data = auth_xform->key.data;
733         uint8_t key_length = auth_xform->key.length;
734
735         session->aes_cmac = 0;
736         session->auth_key_length = auth_xform->key.length;
737         session->auth_iv.offset = auth_xform->iv.offset;
738         session->auth_iv.length = auth_xform->iv.length;
739         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
740
741         switch (auth_xform->algo) {
742         case RTE_CRYPTO_AUTH_SHA1:
743                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
744                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
745                 break;
746         case RTE_CRYPTO_AUTH_SHA224:
747                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
748                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
749                 break;
750         case RTE_CRYPTO_AUTH_SHA256:
751                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
752                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
753                 break;
754         case RTE_CRYPTO_AUTH_SHA384:
755                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
756                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
757                 break;
758         case RTE_CRYPTO_AUTH_SHA512:
759                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
760                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
761                 break;
762         case RTE_CRYPTO_AUTH_SHA1_HMAC:
763                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
764                 break;
765         case RTE_CRYPTO_AUTH_SHA224_HMAC:
766                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
767                 break;
768         case RTE_CRYPTO_AUTH_SHA256_HMAC:
769                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
770                 break;
771         case RTE_CRYPTO_AUTH_SHA384_HMAC:
772                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
773                 break;
774         case RTE_CRYPTO_AUTH_SHA512_HMAC:
775                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
776                 break;
777         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
778                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
779                 break;
780         case RTE_CRYPTO_AUTH_AES_CMAC:
781                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
782                 session->aes_cmac = 1;
783                 break;
784         case RTE_CRYPTO_AUTH_AES_GMAC:
785                 if (qat_sym_validate_aes_key(auth_xform->key.length,
786                                 &session->qat_cipher_alg) != 0) {
787                         QAT_LOG(ERR, "Invalid AES key size");
788                         return -EINVAL;
789                 }
790                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
791                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
792                 if (session->auth_iv.length == 0)
793                         session->auth_iv.length = AES_GCM_J0_LEN;
794                 break;
795         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
796                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
797                 break;
798         case RTE_CRYPTO_AUTH_MD5_HMAC:
799                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
800                 break;
801         case RTE_CRYPTO_AUTH_NULL:
802                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
803                 break;
804         case RTE_CRYPTO_AUTH_KASUMI_F9:
805                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
806                 break;
807         case RTE_CRYPTO_AUTH_ZUC_EIA3:
808                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
809                         QAT_LOG(ERR, "%s not supported on this device",
810                                 rte_crypto_auth_algorithm_strings
811                                 [auth_xform->algo]);
812                         return -ENOTSUP;
813                 }
814                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
815                 break;
816         case RTE_CRYPTO_AUTH_MD5:
817         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
818                 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
819                                 auth_xform->algo);
820                 return -ENOTSUP;
821         default:
822                 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
823                                 auth_xform->algo);
824                 return -EINVAL;
825         }
826
827         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
828                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
829                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
830                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
831                         /*
832                          * It needs to create cipher desc content first,
833                          * then authentication
834                          */
835
836                         if (qat_sym_cd_cipher_set(session,
837                                                 auth_xform->key.data,
838                                                 auth_xform->key.length))
839                                 return -EINVAL;
840
841                         if (qat_sym_cd_auth_set(session,
842                                                 key_data,
843                                                 key_length,
844                                                 0,
845                                                 auth_xform->digest_length,
846                                                 auth_xform->op))
847                                 return -EINVAL;
848                 } else {
849                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
850                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
851                         /*
852                          * It needs to create authentication desc content first,
853                          * then cipher
854                          */
855
856                         if (qat_sym_cd_auth_set(session,
857                                         key_data,
858                                         key_length,
859                                         0,
860                                         auth_xform->digest_length,
861                                         auth_xform->op))
862                                 return -EINVAL;
863
864                         if (qat_sym_cd_cipher_set(session,
865                                                 auth_xform->key.data,
866                                                 auth_xform->key.length))
867                                 return -EINVAL;
868                 }
869                 /* Restore to authentication only only */
870                 session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
871         } else {
872                 if (qat_sym_cd_auth_set(session,
873                                 key_data,
874                                 key_length,
875                                 0,
876                                 auth_xform->digest_length,
877                                 auth_xform->op))
878                         return -EINVAL;
879         }
880
881         session->digest_length = auth_xform->digest_length;
882         return 0;
883 }
884
885 int
886 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
887                                 struct rte_crypto_sym_xform *xform,
888                                 struct qat_sym_session *session)
889 {
890         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
891         enum rte_crypto_auth_operation crypto_operation;
892         struct qat_sym_dev_private *internals =
893                         dev->data->dev_private;
894         enum qat_device_gen qat_dev_gen =
895                         internals->qat_dev->qat_dev_gen;
896
897         /*
898          * Store AEAD IV parameters as cipher IV,
899          * to avoid unnecessary memory usage
900          */
901         session->cipher_iv.offset = xform->aead.iv.offset;
902         session->cipher_iv.length = xform->aead.iv.length;
903
904         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
905
906         session->is_single_pass = 0;
907         switch (aead_xform->algo) {
908         case RTE_CRYPTO_AEAD_AES_GCM:
909                 if (qat_sym_validate_aes_key(aead_xform->key.length,
910                                 &session->qat_cipher_alg) != 0) {
911                         QAT_LOG(ERR, "Invalid AES key size");
912                         return -EINVAL;
913                 }
914                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
915                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
916                 if (qat_dev_gen == QAT_GEN3 && aead_xform->iv.length ==
917                                 QAT_AES_GCM_SPC_IV_SIZE) {
918                         return qat_sym_session_handle_single_pass(session,
919                                         aead_xform);
920                 }
921                 if (session->cipher_iv.length == 0)
922                         session->cipher_iv.length = AES_GCM_J0_LEN;
923                 if (qat_dev_gen == QAT_GEN4)
924                         session->is_ucs = 1;
925                 break;
926         case RTE_CRYPTO_AEAD_AES_CCM:
927                 if (qat_sym_validate_aes_key(aead_xform->key.length,
928                                 &session->qat_cipher_alg) != 0) {
929                         QAT_LOG(ERR, "Invalid AES key size");
930                         return -EINVAL;
931                 }
932                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
933                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
934                 if (qat_dev_gen == QAT_GEN4)
935                         session->is_ucs = 1;
936                 break;
937         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
938                 if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
939                         return -EINVAL;
940                 session->qat_cipher_alg =
941                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
942                 return qat_sym_session_handle_single_pass(session,
943                                                 aead_xform);
944         default:
945                 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
946                                 aead_xform->algo);
947                 return -EINVAL;
948         }
949
950         if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
951                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
952                         (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
953                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
954                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
955                 /*
956                  * It needs to create cipher desc content first,
957                  * then authentication
958                  */
959                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
960                         RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
961
962                 if (qat_sym_cd_cipher_set(session,
963                                         aead_xform->key.data,
964                                         aead_xform->key.length))
965                         return -EINVAL;
966
967                 if (qat_sym_cd_auth_set(session,
968                                         aead_xform->key.data,
969                                         aead_xform->key.length,
970                                         aead_xform->aad_length,
971                                         aead_xform->digest_length,
972                                         crypto_operation))
973                         return -EINVAL;
974         } else {
975                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
976                 /*
977                  * It needs to create authentication desc content first,
978                  * then cipher
979                  */
980
981                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
982                         RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
983
984                 if (qat_sym_cd_auth_set(session,
985                                         aead_xform->key.data,
986                                         aead_xform->key.length,
987                                         aead_xform->aad_length,
988                                         aead_xform->digest_length,
989                                         crypto_operation))
990                         return -EINVAL;
991
992                 if (qat_sym_cd_cipher_set(session,
993                                         aead_xform->key.data,
994                                         aead_xform->key.length))
995                         return -EINVAL;
996         }
997
998         session->digest_length = aead_xform->digest_length;
999         return 0;
1000 }
1001
1002 unsigned int qat_sym_session_get_private_size(
1003                 struct rte_cryptodev *dev __rte_unused)
1004 {
1005         return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
1006 }
1007
1008 /* returns block size in bytes per cipher algo */
1009 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
1010 {
1011         switch (qat_cipher_alg) {
1012         case ICP_QAT_HW_CIPHER_ALGO_DES:
1013                 return ICP_QAT_HW_DES_BLK_SZ;
1014         case ICP_QAT_HW_CIPHER_ALGO_3DES:
1015                 return ICP_QAT_HW_3DES_BLK_SZ;
1016         case ICP_QAT_HW_CIPHER_ALGO_AES128:
1017         case ICP_QAT_HW_CIPHER_ALGO_AES192:
1018         case ICP_QAT_HW_CIPHER_ALGO_AES256:
1019                 return ICP_QAT_HW_AES_BLK_SZ;
1020         default:
1021                 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
1022                 return -EFAULT;
1023         };
1024         return -EFAULT;
1025 }
1026
1027 /*
1028  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
1029  * This is digest size rounded up to nearest quadword
1030  */
1031 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1032 {
1033         switch (qat_hash_alg) {
1034         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1035                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
1036                                                 QAT_HW_DEFAULT_ALIGNMENT);
1037         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1038                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
1039                                                 QAT_HW_DEFAULT_ALIGNMENT);
1040         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1041                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
1042                                                 QAT_HW_DEFAULT_ALIGNMENT);
1043         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1044                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
1045                                                 QAT_HW_DEFAULT_ALIGNMENT);
1046         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1047                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1048                                                 QAT_HW_DEFAULT_ALIGNMENT);
1049         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1050                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
1051                                                 QAT_HW_DEFAULT_ALIGNMENT);
1052         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1053         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1054                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1055                                                 QAT_HW_DEFAULT_ALIGNMENT);
1056         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1057                 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1058                                                 QAT_HW_DEFAULT_ALIGNMENT);
1059         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1060                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1061                                                 QAT_HW_DEFAULT_ALIGNMENT);
1062         case ICP_QAT_HW_AUTH_ALGO_MD5:
1063                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1064                                                 QAT_HW_DEFAULT_ALIGNMENT);
1065         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1066                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1067                                                 QAT_HW_DEFAULT_ALIGNMENT);
1068         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1069                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1070                                                 QAT_HW_DEFAULT_ALIGNMENT);
1071         case ICP_QAT_HW_AUTH_ALGO_NULL:
1072                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1073                                                 QAT_HW_DEFAULT_ALIGNMENT);
1074         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1075                 /* return maximum state1 size in this case */
1076                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1077                                                 QAT_HW_DEFAULT_ALIGNMENT);
1078         default:
1079                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1080                 return -EFAULT;
1081         };
1082         return -EFAULT;
1083 }
1084
1085 /* returns digest size in bytes  per hash algo */
1086 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1087 {
1088         switch (qat_hash_alg) {
1089         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1090                 return ICP_QAT_HW_SHA1_STATE1_SZ;
1091         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1092                 return ICP_QAT_HW_SHA224_STATE1_SZ;
1093         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1094                 return ICP_QAT_HW_SHA256_STATE1_SZ;
1095         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1096                 return ICP_QAT_HW_SHA384_STATE1_SZ;
1097         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1098                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1099         case ICP_QAT_HW_AUTH_ALGO_MD5:
1100                 return ICP_QAT_HW_MD5_STATE1_SZ;
1101         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1102                 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1103         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1104                 /* return maximum digest size in this case */
1105                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1106         default:
1107                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1108                 return -EFAULT;
1109         };
1110         return -EFAULT;
1111 }
1112
1113 /* returns block size in byes per hash algo */
1114 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1115 {
1116         switch (qat_hash_alg) {
1117         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1118                 return SHA_CBLOCK;
1119         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1120                 return SHA256_CBLOCK;
1121         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1122                 return SHA256_CBLOCK;
1123         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1124                 return SHA512_CBLOCK;
1125         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1126                 return SHA512_CBLOCK;
1127         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1128                 return 16;
1129         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1130                 return ICP_QAT_HW_AES_BLK_SZ;
1131         case ICP_QAT_HW_AUTH_ALGO_MD5:
1132                 return MD5_CBLOCK;
1133         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1134                 /* return maximum block size in this case */
1135                 return SHA512_CBLOCK;
1136         default:
1137                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1138                 return -EFAULT;
1139         };
1140         return -EFAULT;
1141 }
1142
1143 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1144 {
1145         SHA_CTX ctx;
1146
1147         if (!SHA1_Init(&ctx))
1148                 return -EFAULT;
1149         SHA1_Transform(&ctx, data_in);
1150         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1151         return 0;
1152 }
1153
1154 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1155 {
1156         SHA256_CTX ctx;
1157
1158         if (!SHA224_Init(&ctx))
1159                 return -EFAULT;
1160         SHA256_Transform(&ctx, data_in);
1161         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1162         return 0;
1163 }
1164
1165 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1166 {
1167         SHA256_CTX ctx;
1168
1169         if (!SHA256_Init(&ctx))
1170                 return -EFAULT;
1171         SHA256_Transform(&ctx, data_in);
1172         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1173         return 0;
1174 }
1175
1176 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1177 {
1178         SHA512_CTX ctx;
1179
1180         if (!SHA384_Init(&ctx))
1181                 return -EFAULT;
1182         SHA512_Transform(&ctx, data_in);
1183         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1184         return 0;
1185 }
1186
1187 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1188 {
1189         SHA512_CTX ctx;
1190
1191         if (!SHA512_Init(&ctx))
1192                 return -EFAULT;
1193         SHA512_Transform(&ctx, data_in);
1194         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1195         return 0;
1196 }
1197
1198 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1199 {
1200         MD5_CTX ctx;
1201
1202         if (!MD5_Init(&ctx))
1203                 return -EFAULT;
1204         MD5_Transform(&ctx, data_in);
1205         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1206
1207         return 0;
1208 }
1209
1210 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1211                         uint8_t *data_in,
1212                         uint8_t *data_out)
1213 {
1214         int digest_size;
1215         uint8_t digest[qat_hash_get_digest_size(
1216                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1217         uint32_t *hash_state_out_be32;
1218         uint64_t *hash_state_out_be64;
1219         int i;
1220
1221         /* Initialize to avoid gcc warning */
1222         memset(digest, 0, sizeof(digest));
1223
1224         digest_size = qat_hash_get_digest_size(hash_alg);
1225         if (digest_size <= 0)
1226                 return -EFAULT;
1227
1228         hash_state_out_be32 = (uint32_t *)data_out;
1229         hash_state_out_be64 = (uint64_t *)data_out;
1230
1231         switch (hash_alg) {
1232         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1233                 if (partial_hash_sha1(data_in, digest))
1234                         return -EFAULT;
1235                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1236                         *hash_state_out_be32 =
1237                                 rte_bswap32(*(((uint32_t *)digest)+i));
1238                 break;
1239         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1240                 if (partial_hash_sha224(data_in, digest))
1241                         return -EFAULT;
1242                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1243                         *hash_state_out_be32 =
1244                                 rte_bswap32(*(((uint32_t *)digest)+i));
1245                 break;
1246         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1247                 if (partial_hash_sha256(data_in, digest))
1248                         return -EFAULT;
1249                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1250                         *hash_state_out_be32 =
1251                                 rte_bswap32(*(((uint32_t *)digest)+i));
1252                 break;
1253         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1254                 if (partial_hash_sha384(data_in, digest))
1255                         return -EFAULT;
1256                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1257                         *hash_state_out_be64 =
1258                                 rte_bswap64(*(((uint64_t *)digest)+i));
1259                 break;
1260         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1261                 if (partial_hash_sha512(data_in, digest))
1262                         return -EFAULT;
1263                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1264                         *hash_state_out_be64 =
1265                                 rte_bswap64(*(((uint64_t *)digest)+i));
1266                 break;
1267         case ICP_QAT_HW_AUTH_ALGO_MD5:
1268                 if (partial_hash_md5(data_in, data_out))
1269                         return -EFAULT;
1270                 break;
1271         default:
1272                 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1273                 return -EFAULT;
1274         }
1275
1276         return 0;
1277 }
1278 #define HMAC_IPAD_VALUE 0x36
1279 #define HMAC_OPAD_VALUE 0x5c
1280 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1281
1282 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1283
1284 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1285 {
1286         int i;
1287
1288         derived[0] = base[0] << 1;
1289         for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1290                 derived[i] = base[i] << 1;
1291                 derived[i - 1] |= base[i] >> 7;
1292         }
1293
1294         if (base[0] & 0x80)
1295                 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1296 }
1297
1298 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1299                                 const uint8_t *auth_key,
1300                                 uint16_t auth_keylen,
1301                                 uint8_t *p_state_buf,
1302                                 uint16_t *p_state_len,
1303                                 uint8_t aes_cmac)
1304 {
1305         int block_size;
1306         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1307         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1308         int i;
1309
1310         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1311
1312                 /* CMAC */
1313                 if (aes_cmac) {
1314                         AES_KEY enc_key;
1315                         uint8_t *in = NULL;
1316                         uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1317                         uint8_t *k1, *k2;
1318
1319                         auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1320
1321                         in = rte_zmalloc("AES CMAC K1",
1322                                          ICP_QAT_HW_AES_128_KEY_SZ, 16);
1323
1324                         if (in == NULL) {
1325                                 QAT_LOG(ERR, "Failed to alloc memory");
1326                                 return -ENOMEM;
1327                         }
1328
1329                         rte_memcpy(in, AES_CMAC_SEED,
1330                                    ICP_QAT_HW_AES_128_KEY_SZ);
1331                         rte_memcpy(p_state_buf, auth_key, auth_keylen);
1332
1333                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1334                                 &enc_key) != 0) {
1335                                 rte_free(in);
1336                                 return -EFAULT;
1337                         }
1338
1339                         AES_encrypt(in, k0, &enc_key);
1340
1341                         k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1342                         k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1343
1344                         aes_cmac_key_derive(k0, k1);
1345                         aes_cmac_key_derive(k1, k2);
1346
1347                         memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1348                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1349                         rte_free(in);
1350                         return 0;
1351                 } else {
1352                         static uint8_t qat_aes_xcbc_key_seed[
1353                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1354                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1355                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1356                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1357                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1358                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1359                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1360                         };
1361
1362                         uint8_t *in = NULL;
1363                         uint8_t *out = p_state_buf;
1364                         int x;
1365                         AES_KEY enc_key;
1366
1367                         in = rte_zmalloc("working mem for key",
1368                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1369                         if (in == NULL) {
1370                                 QAT_LOG(ERR, "Failed to alloc memory");
1371                                 return -ENOMEM;
1372                         }
1373
1374                         rte_memcpy(in, qat_aes_xcbc_key_seed,
1375                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1376                         for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1377                                 if (AES_set_encrypt_key(auth_key,
1378                                                         auth_keylen << 3,
1379                                                         &enc_key) != 0) {
1380                                         rte_free(in -
1381                                           (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1382                                         memset(out -
1383                                            (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1384                                           0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1385                                         return -EFAULT;
1386                                 }
1387                                 AES_encrypt(in, out, &enc_key);
1388                                 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1389                                 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1390                         }
1391                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1392                         rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1393                         return 0;
1394                 }
1395
1396         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1397                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1398                 uint8_t *in = NULL;
1399                 uint8_t *out = p_state_buf;
1400                 AES_KEY enc_key;
1401
1402                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1403                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1404                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1405                 in = rte_zmalloc("working mem for key",
1406                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
1407                 if (in == NULL) {
1408                         QAT_LOG(ERR, "Failed to alloc memory");
1409                         return -ENOMEM;
1410                 }
1411
1412                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1413                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1414                         &enc_key) != 0) {
1415                         return -EFAULT;
1416                 }
1417                 AES_encrypt(in, out, &enc_key);
1418                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1419                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1420                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1421                 rte_free(in);
1422                 return 0;
1423         }
1424
1425         block_size = qat_hash_get_block_size(hash_alg);
1426         if (block_size < 0)
1427                 return block_size;
1428         /* init ipad and opad from key and xor with fixed values */
1429         memset(ipad, 0, block_size);
1430         memset(opad, 0, block_size);
1431
1432         if (auth_keylen > (unsigned int)block_size) {
1433                 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1434                 return -EFAULT;
1435         }
1436         rte_memcpy(ipad, auth_key, auth_keylen);
1437         rte_memcpy(opad, auth_key, auth_keylen);
1438
1439         for (i = 0; i < block_size; i++) {
1440                 uint8_t *ipad_ptr = ipad + i;
1441                 uint8_t *opad_ptr = opad + i;
1442                 *ipad_ptr ^= HMAC_IPAD_VALUE;
1443                 *opad_ptr ^= HMAC_OPAD_VALUE;
1444         }
1445
1446         /* do partial hash of ipad and copy to state1 */
1447         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1448                 memset(ipad, 0, block_size);
1449                 memset(opad, 0, block_size);
1450                 QAT_LOG(ERR, "ipad precompute failed");
1451                 return -EFAULT;
1452         }
1453
1454         /*
1455          * State len is a multiple of 8, so may be larger than the digest.
1456          * Put the partial hash of opad state_len bytes after state1
1457          */
1458         *p_state_len = qat_hash_get_state1_size(hash_alg);
1459         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1460                 memset(ipad, 0, block_size);
1461                 memset(opad, 0, block_size);
1462                 QAT_LOG(ERR, "opad precompute failed");
1463                 return -EFAULT;
1464         }
1465
1466         /*  don't leave data lying around */
1467         memset(ipad, 0, block_size);
1468         memset(opad, 0, block_size);
1469         return 0;
1470 }
1471
1472 static void
1473 qat_sym_session_init_common_hdr(struct qat_sym_session *session,
1474                 struct icp_qat_fw_comn_req_hdr *header,
1475                 enum qat_sym_proto_flag proto_flags)
1476 {
1477         header->hdr_flags =
1478                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1479         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1480         header->comn_req_flags =
1481                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1482                                         QAT_COMN_PTR_TYPE_FLAT);
1483         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1484                                   ICP_QAT_FW_LA_PARTIAL_NONE);
1485         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1486                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1487
1488         switch (proto_flags)            {
1489         case QAT_CRYPTO_PROTO_FLAG_NONE:
1490                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1491                                         ICP_QAT_FW_LA_NO_PROTO);
1492                 break;
1493         case QAT_CRYPTO_PROTO_FLAG_CCM:
1494                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1495                                         ICP_QAT_FW_LA_CCM_PROTO);
1496                 break;
1497         case QAT_CRYPTO_PROTO_FLAG_GCM:
1498                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1499                                         ICP_QAT_FW_LA_GCM_PROTO);
1500                 break;
1501         case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1502                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1503                                         ICP_QAT_FW_LA_SNOW_3G_PROTO);
1504                 break;
1505         case QAT_CRYPTO_PROTO_FLAG_ZUC:
1506                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1507                         ICP_QAT_FW_LA_ZUC_3G_PROTO);
1508                 break;
1509         }
1510
1511         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1512                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
1513         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1514                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1515
1516         if (session->is_ucs) {
1517                 ICP_QAT_FW_LA_SLICE_TYPE_SET(
1518                                 session->fw_req.comn_hdr.serv_specif_flags,
1519                                 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
1520         }
1521 }
1522
1523 /*
1524  *      Snow3G and ZUC should never use this function
1525  *      and set its protocol flag in both cipher and auth part of content
1526  *      descriptor building function
1527  */
1528 static enum qat_sym_proto_flag
1529 qat_get_crypto_proto_flag(uint16_t flags)
1530 {
1531         int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
1532         enum qat_sym_proto_flag qat_proto_flag =
1533                         QAT_CRYPTO_PROTO_FLAG_NONE;
1534
1535         switch (proto) {
1536         case ICP_QAT_FW_LA_GCM_PROTO:
1537                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1538                 break;
1539         case ICP_QAT_FW_LA_CCM_PROTO:
1540                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
1541                 break;
1542         }
1543
1544         return qat_proto_flag;
1545 }
1546
1547 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
1548                                                 const uint8_t *cipherkey,
1549                                                 uint32_t cipherkeylen)
1550 {
1551         struct icp_qat_hw_cipher_algo_blk *cipher;
1552         struct icp_qat_hw_cipher_algo_blk20 *cipher20;
1553         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1554         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1555         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1556         void *ptr = &req_tmpl->cd_ctrl;
1557         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1558         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1559         enum icp_qat_hw_cipher_convert key_convert;
1560         enum qat_sym_proto_flag qat_proto_flag =
1561                 QAT_CRYPTO_PROTO_FLAG_NONE;
1562         uint32_t total_key_size;
1563         uint16_t cipher_offset, cd_size;
1564         uint32_t wordIndex  = 0;
1565         uint32_t *temp_key = NULL;
1566
1567         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1568                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1569                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1570                                         ICP_QAT_FW_SLICE_CIPHER);
1571                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1572                                         ICP_QAT_FW_SLICE_DRAM_WR);
1573                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1574                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1575                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1576                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1577                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1578         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1579                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1580                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1581                                         ICP_QAT_FW_SLICE_CIPHER);
1582                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1583                                         ICP_QAT_FW_SLICE_AUTH);
1584                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1585                                         ICP_QAT_FW_SLICE_AUTH);
1586                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1587                                         ICP_QAT_FW_SLICE_DRAM_WR);
1588                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1589         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1590                 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1591                 return -EFAULT;
1592         }
1593
1594         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1595                 /*
1596                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
1597                  * Overriding default values previously set
1598                  */
1599                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1600                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1601         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1602                 || cdesc->qat_cipher_alg ==
1603                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1604                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1605         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1606                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1607         else
1608                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1609
1610         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1611                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1612                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1613                 cipher_cd_ctrl->cipher_state_sz =
1614                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1615                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1616
1617         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1618                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1619                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1620                 cipher_cd_ctrl->cipher_padding_sz =
1621                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1622         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1623                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1624                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1625                 qat_proto_flag =
1626                         qat_get_crypto_proto_flag(header->serv_specif_flags);
1627         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1628                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1629                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1630                 qat_proto_flag =
1631                         qat_get_crypto_proto_flag(header->serv_specif_flags);
1632         } else if (cdesc->qat_cipher_alg ==
1633                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1634                 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1635                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1636                 cipher_cd_ctrl->cipher_state_sz =
1637                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1638                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1639                 cdesc->min_qat_dev_gen = QAT_GEN2;
1640         } else {
1641                 total_key_size = cipherkeylen;
1642                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1643                 qat_proto_flag =
1644                         qat_get_crypto_proto_flag(header->serv_specif_flags);
1645         }
1646         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1647         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1648
1649         header->service_cmd_id = cdesc->qat_cmd;
1650         qat_sym_session_init_common_hdr(cdesc, header, qat_proto_flag);
1651
1652         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1653         cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
1654         cipher->cipher_config.val =
1655             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1656                                         cdesc->qat_cipher_alg, key_convert,
1657                                         cdesc->qat_dir);
1658
1659         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1660                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1661                                         sizeof(struct icp_qat_hw_cipher_config)
1662                                         + cipherkeylen);
1663                 memcpy(cipher->key, cipherkey, cipherkeylen);
1664                 memcpy(temp_key, cipherkey, cipherkeylen);
1665
1666                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1667                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1668                                                                 wordIndex++)
1669                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1670
1671                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1672                                         cipherkeylen + cipherkeylen;
1673         } else if (cdesc->is_ucs) {
1674                 const uint8_t *final_key = cipherkey;
1675
1676                 total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
1677                         ICP_QAT_HW_AES_128_KEY_SZ);
1678                 cipher20->cipher_config.reserved[0] = 0;
1679                 cipher20->cipher_config.reserved[1] = 0;
1680                 cipher20->cipher_config.reserved[2] = 0;
1681
1682                 rte_memcpy(cipher20->key, final_key, cipherkeylen);
1683                 cdesc->cd_cur_ptr +=
1684                         sizeof(struct icp_qat_hw_ucs_cipher_config) +
1685                                         cipherkeylen;
1686         } else {
1687                 memcpy(cipher->key, cipherkey, cipherkeylen);
1688                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1689                                         cipherkeylen;
1690         }
1691
1692         if (total_key_size > cipherkeylen) {
1693                 uint32_t padding_size =  total_key_size-cipherkeylen;
1694                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1695                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1696                         /* K3 not provided so use K1 = K3*/
1697                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1698                 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1699                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1700                         /* K2 and K3 not provided so use K1 = K2 = K3*/
1701                         memcpy(cdesc->cd_cur_ptr, cipherkey,
1702                                 cipherkeylen);
1703                         memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1704                                 cipherkey, cipherkeylen);
1705                 } else
1706                         memset(cdesc->cd_cur_ptr, 0, padding_size);
1707
1708                 cdesc->cd_cur_ptr += padding_size;
1709         }
1710         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1711         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1712         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1713
1714         return 0;
1715 }
1716
1717 int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
1718                                                 const uint8_t *authkey,
1719                                                 uint32_t authkeylen,
1720                                                 uint32_t aad_length,
1721                                                 uint32_t digestsize,
1722                                                 unsigned int operation)
1723 {
1724         struct icp_qat_hw_auth_setup *hash;
1725         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1726         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1727         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1728         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1729         void *ptr = &req_tmpl->cd_ctrl;
1730         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1731         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1732         struct icp_qat_fw_la_auth_req_params *auth_param =
1733                 (struct icp_qat_fw_la_auth_req_params *)
1734                 ((char *)&req_tmpl->serv_specif_rqpars +
1735                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1736         uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1737         uint16_t hash_offset, cd_size;
1738         uint32_t *aad_len = NULL;
1739         uint32_t wordIndex  = 0;
1740         uint32_t *pTempKey;
1741         enum qat_sym_proto_flag qat_proto_flag =
1742                 QAT_CRYPTO_PROTO_FLAG_NONE;
1743
1744         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1745                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1746                                         ICP_QAT_FW_SLICE_AUTH);
1747                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1748                                         ICP_QAT_FW_SLICE_DRAM_WR);
1749                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1750         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1751                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1752                                 ICP_QAT_FW_SLICE_AUTH);
1753                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1754                                 ICP_QAT_FW_SLICE_CIPHER);
1755                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1756                                 ICP_QAT_FW_SLICE_CIPHER);
1757                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1758                                 ICP_QAT_FW_SLICE_DRAM_WR);
1759                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1760         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1761                 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1762                 return -EFAULT;
1763         }
1764
1765         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1766                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1767                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1768                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1769                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
1770                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
1771         } else {
1772                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1773                                            ICP_QAT_FW_LA_RET_AUTH_RES);
1774                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1775                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1776                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
1777         }
1778
1779         /*
1780          * Setup the inner hash config
1781          */
1782         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1783         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
1784         hash->auth_config.reserved = 0;
1785         hash->auth_config.config =
1786                         ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
1787                                 cdesc->qat_hash_alg, digestsize);
1788
1789         if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
1790                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
1791                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
1792                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
1793                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
1794                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
1795                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
1796                         )
1797                 hash->auth_counter.counter = 0;
1798         else {
1799                 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
1800
1801                 if (block_size < 0)
1802                         return block_size;
1803                 hash->auth_counter.counter = rte_bswap32(block_size);
1804         }
1805
1806         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
1807
1808         /*
1809          * cd_cur_ptr now points at the state1 information.
1810          */
1811         switch (cdesc->qat_hash_alg) {
1812         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1813                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1814                         /* Plain SHA-1 */
1815                         rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
1816                                         sizeof(sha1InitialState));
1817                         state1_size = qat_hash_get_state1_size(
1818                                         cdesc->qat_hash_alg);
1819                         break;
1820                 }
1821                 /* SHA-1 HMAC */
1822                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
1823                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1824                         cdesc->aes_cmac)) {
1825                         QAT_LOG(ERR, "(SHA)precompute failed");
1826                         return -EFAULT;
1827                 }
1828                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
1829                 break;
1830         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1831                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1832                         /* Plain SHA-224 */
1833                         rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
1834                                         sizeof(sha224InitialState));
1835                         state1_size = qat_hash_get_state1_size(
1836                                         cdesc->qat_hash_alg);
1837                         break;
1838                 }
1839                 /* SHA-224 HMAC */
1840                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
1841                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1842                         cdesc->aes_cmac)) {
1843                         QAT_LOG(ERR, "(SHA)precompute failed");
1844                         return -EFAULT;
1845                 }
1846                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
1847                 break;
1848         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1849                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1850                         /* Plain SHA-256 */
1851                         rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
1852                                         sizeof(sha256InitialState));
1853                         state1_size = qat_hash_get_state1_size(
1854                                         cdesc->qat_hash_alg);
1855                         break;
1856                 }
1857                 /* SHA-256 HMAC */
1858                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
1859                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1860                         cdesc->aes_cmac)) {
1861                         QAT_LOG(ERR, "(SHA)precompute failed");
1862                         return -EFAULT;
1863                 }
1864                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
1865                 break;
1866         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1867                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1868                         /* Plain SHA-384 */
1869                         rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
1870                                         sizeof(sha384InitialState));
1871                         state1_size = qat_hash_get_state1_size(
1872                                         cdesc->qat_hash_alg);
1873                         break;
1874                 }
1875                 /* SHA-384 HMAC */
1876                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
1877                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1878                         cdesc->aes_cmac)) {
1879                         QAT_LOG(ERR, "(SHA)precompute failed");
1880                         return -EFAULT;
1881                 }
1882                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
1883                 break;
1884         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1885                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1886                         /* Plain SHA-512 */
1887                         rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
1888                                         sizeof(sha512InitialState));
1889                         state1_size = qat_hash_get_state1_size(
1890                                         cdesc->qat_hash_alg);
1891                         break;
1892                 }
1893                 /* SHA-512 HMAC */
1894                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
1895                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1896                         cdesc->aes_cmac)) {
1897                         QAT_LOG(ERR, "(SHA)precompute failed");
1898                         return -EFAULT;
1899                 }
1900                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
1901                 break;
1902         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1903                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1904
1905                 if (cdesc->aes_cmac)
1906                         memset(cdesc->cd_cur_ptr, 0, state1_size);
1907                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
1908                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
1909                         &state2_size, cdesc->aes_cmac)) {
1910                         cdesc->aes_cmac ? QAT_LOG(ERR,
1911                                                   "(CMAC)precompute failed")
1912                                         : QAT_LOG(ERR,
1913                                                   "(XCBC)precompute failed");
1914                         return -EFAULT;
1915                 }
1916                 break;
1917         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1918         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1919                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1920                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
1921                 if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
1922                         authkeylen, cdesc->cd_cur_ptr + state1_size,
1923                         &state2_size, cdesc->aes_cmac)) {
1924                         QAT_LOG(ERR, "(GCM)precompute failed");
1925                         return -EFAULT;
1926                 }
1927                 /*
1928                  * Write (the length of AAD) into bytes 16-19 of state2
1929                  * in big-endian format. This field is 8 bytes
1930                  */
1931                 auth_param->u2.aad_sz =
1932                                 RTE_ALIGN_CEIL(aad_length, 16);
1933                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
1934
1935                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
1936                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
1937                                         ICP_QAT_HW_GALOIS_H_SZ);
1938                 *aad_len = rte_bswap32(aad_length);
1939                 cdesc->aad_len = aad_length;
1940                 break;
1941         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1942                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1943                 state1_size = qat_hash_get_state1_size(
1944                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
1945                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
1946                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1947
1948                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
1949                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
1950                 cipherconfig->cipher_config.val =
1951                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
1952                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
1953                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
1954                         ICP_QAT_HW_CIPHER_ENCRYPT);
1955                 memcpy(cipherconfig->key, authkey, authkeylen);
1956                 memset(cipherconfig->key + authkeylen,
1957                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
1958                 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
1959                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1960                 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1961                 break;
1962         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1963                 hash->auth_config.config =
1964                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
1965                                 cdesc->qat_hash_alg, digestsize);
1966                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1967                 state1_size = qat_hash_get_state1_size(
1968                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
1969                 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
1970                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
1971                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
1972
1973                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1974                 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1975                 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1976                 cdesc->min_qat_dev_gen = QAT_GEN2;
1977
1978                 break;
1979         case ICP_QAT_HW_AUTH_ALGO_MD5:
1980                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
1981                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1982                         cdesc->aes_cmac)) {
1983                         QAT_LOG(ERR, "(MD5)precompute failed");
1984                         return -EFAULT;
1985                 }
1986                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
1987                 break;
1988         case ICP_QAT_HW_AUTH_ALGO_NULL:
1989                 state1_size = qat_hash_get_state1_size(
1990                                 ICP_QAT_HW_AUTH_ALGO_NULL);
1991                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
1992                 break;
1993         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1994                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
1995                 state1_size = qat_hash_get_state1_size(
1996                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
1997                 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
1998                                 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
1999
2000                 if (aad_length > 0) {
2001                         aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
2002                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
2003                         auth_param->u2.aad_sz =
2004                         RTE_ALIGN_CEIL(aad_length,
2005                         ICP_QAT_HW_CCM_AAD_ALIGNMENT);
2006                 } else {
2007                         auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
2008                 }
2009                 cdesc->aad_len = aad_length;
2010                 hash->auth_counter.counter = 0;
2011
2012                 hash_cd_ctrl->outer_prefix_sz = digestsize;
2013                 auth_param->hash_state_sz = digestsize;
2014
2015                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2016                 break;
2017         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
2018                 state1_size = qat_hash_get_state1_size(
2019                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
2020                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
2021                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
2022                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
2023                                                         + authkeylen);
2024                 /*
2025                 * The Inner Hash Initial State2 block must contain IK
2026                 * (Initialisation Key), followed by IK XOR-ed with KM
2027                 * (Key Modifier): IK||(IK^KM).
2028                 */
2029                 /* write the auth key */
2030                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2031                 /* initialise temp key with auth key */
2032                 memcpy(pTempKey, authkey, authkeylen);
2033                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
2034                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
2035                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
2036                 break;
2037         default:
2038                 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
2039                 return -EFAULT;
2040         }
2041
2042         /* Request template setup */
2043         qat_sym_session_init_common_hdr(cdesc, header, qat_proto_flag);
2044         header->service_cmd_id = cdesc->qat_cmd;
2045
2046         /* Auth CD config setup */
2047         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
2048         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
2049         hash_cd_ctrl->inner_res_sz = digestsize;
2050         hash_cd_ctrl->final_sz = digestsize;
2051         hash_cd_ctrl->inner_state1_sz = state1_size;
2052         auth_param->auth_res_sz = digestsize;
2053
2054         hash_cd_ctrl->inner_state2_sz  = state2_size;
2055         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2056                         ((sizeof(struct icp_qat_hw_auth_setup) +
2057                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2058                                         >> 3);
2059
2060         cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2061         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2062
2063         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2064         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2065
2066         return 0;
2067 }
2068
2069 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2070 {
2071         switch (key_len) {
2072         case ICP_QAT_HW_AES_128_KEY_SZ:
2073                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2074                 break;
2075         case ICP_QAT_HW_AES_192_KEY_SZ:
2076                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2077                 break;
2078         case ICP_QAT_HW_AES_256_KEY_SZ:
2079                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2080                 break;
2081         default:
2082                 return -EINVAL;
2083         }
2084         return 0;
2085 }
2086
2087 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2088                 enum icp_qat_hw_cipher_algo *alg)
2089 {
2090         switch (key_len) {
2091         case ICP_QAT_HW_AES_128_KEY_SZ:
2092                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2093                 break;
2094         case ICP_QAT_HW_AES_256_KEY_SZ:
2095                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2096                 break;
2097         default:
2098                 return -EINVAL;
2099         }
2100         return 0;
2101 }
2102
2103 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2104 {
2105         switch (key_len) {
2106         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2107                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2108                 break;
2109         default:
2110                 return -EINVAL;
2111         }
2112         return 0;
2113 }
2114
2115 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2116 {
2117         switch (key_len) {
2118         case ICP_QAT_HW_KASUMI_KEY_SZ:
2119                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2120                 break;
2121         default:
2122                 return -EINVAL;
2123         }
2124         return 0;
2125 }
2126
2127 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2128 {
2129         switch (key_len) {
2130         case ICP_QAT_HW_DES_KEY_SZ:
2131                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2132                 break;
2133         default:
2134                 return -EINVAL;
2135         }
2136         return 0;
2137 }
2138
2139 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2140 {
2141         switch (key_len) {
2142         case QAT_3DES_KEY_SZ_OPT1:
2143         case QAT_3DES_KEY_SZ_OPT2:
2144         case QAT_3DES_KEY_SZ_OPT3:
2145                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2146                 break;
2147         default:
2148                 return -EINVAL;
2149         }
2150         return 0;
2151 }
2152
2153 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2154 {
2155         switch (key_len) {
2156         case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2157                 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2158                 break;
2159         default:
2160                 return -EINVAL;
2161         }
2162         return 0;
2163 }
2164
2165 #ifdef RTE_LIB_SECURITY
2166 static int
2167 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2168 {
2169         struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2170         struct rte_security_docsis_xform *docsis = &conf->docsis;
2171
2172         /* CRC generate -> Cipher encrypt */
2173         if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2174
2175                 if (crypto_sym != NULL &&
2176                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2177                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2178                     crypto_sym->cipher.algo ==
2179                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2180                     (crypto_sym->cipher.key.length ==
2181                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2182                      crypto_sym->cipher.key.length ==
2183                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2184                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2185                     crypto_sym->next == NULL) {
2186                         return 0;
2187                 }
2188         /* Cipher decrypt -> CRC verify */
2189         } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2190
2191                 if (crypto_sym != NULL &&
2192                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2193                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2194                     crypto_sym->cipher.algo ==
2195                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2196                     (crypto_sym->cipher.key.length ==
2197                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2198                      crypto_sym->cipher.key.length ==
2199                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2200                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2201                     crypto_sym->next == NULL) {
2202                         return 0;
2203                 }
2204         }
2205
2206         return -EINVAL;
2207 }
2208
2209 static int
2210 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2211                 struct rte_security_session_conf *conf, void *session_private)
2212 {
2213         int ret;
2214         int qat_cmd_id;
2215         struct rte_crypto_sym_xform *xform = NULL;
2216         struct qat_sym_session *session = session_private;
2217
2218         /* Clear the session */
2219         memset(session, 0, qat_sym_session_get_private_size(dev));
2220
2221         ret = qat_sec_session_check_docsis(conf);
2222         if (ret) {
2223                 QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2224                 return ret;
2225         }
2226
2227         xform = conf->crypto_xform;
2228
2229         /* Verify the session physical address is known */
2230         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2231         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2232                 QAT_LOG(ERR,
2233                         "Session physical address unknown. Bad memory pool.");
2234                 return -EINVAL;
2235         }
2236
2237         /* Set context descriptor physical address */
2238         session->cd_paddr = session_paddr +
2239                         offsetof(struct qat_sym_session, cd);
2240
2241         session->min_qat_dev_gen = QAT_GEN1;
2242
2243         /* Get requested QAT command id - should be cipher */
2244         qat_cmd_id = qat_get_cmd_id(xform);
2245         if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2246                 QAT_LOG(ERR, "Unsupported xform chain requested");
2247                 return -ENOTSUP;
2248         }
2249         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2250
2251         ret = qat_sym_session_configure_cipher(dev, xform, session);
2252         if (ret < 0)
2253                 return ret;
2254
2255         return 0;
2256 }
2257
2258 int
2259 qat_security_session_create(void *dev,
2260                                 struct rte_security_session_conf *conf,
2261                                 struct rte_security_session *sess,
2262                                 struct rte_mempool *mempool)
2263 {
2264         void *sess_private_data;
2265         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2266         int ret;
2267
2268         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2269                         conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2270                 QAT_LOG(ERR, "Invalid security protocol");
2271                 return -EINVAL;
2272         }
2273
2274         if (rte_mempool_get(mempool, &sess_private_data)) {
2275                 QAT_LOG(ERR, "Couldn't get object from session mempool");
2276                 return -ENOMEM;
2277         }
2278
2279         ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2280                         sess_private_data);
2281         if (ret != 0) {
2282                 QAT_LOG(ERR, "Failed to configure session parameters");
2283                 /* Return session to mempool */
2284                 rte_mempool_put(mempool, sess_private_data);
2285                 return ret;
2286         }
2287
2288         set_sec_session_private_data(sess, sess_private_data);
2289
2290         return ret;
2291 }
2292
2293 int
2294 qat_security_session_destroy(void *dev __rte_unused,
2295                                  struct rte_security_session *sess)
2296 {
2297         void *sess_priv = get_sec_session_private_data(sess);
2298         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2299
2300         if (sess_priv) {
2301                 if (s->bpi_ctx)
2302                         bpi_cipher_ctx_free(s->bpi_ctx);
2303                 memset(s, 0, qat_sym_session_get_private_size(dev));
2304                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2305
2306                 set_sec_session_private_data(sess, NULL);
2307                 rte_mempool_put(sess_mp, sess_priv);
2308         }
2309         return 0;
2310 }
2311 #endif