crypto/ipsec_mb: fix queue cleanup null pointer dereference
[dpdk.git] / drivers / crypto / qat / qat_sym_session.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  * Copyright(c) 2015-2019 Intel Corporation
3  */
4
5 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
6 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
7 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
8 #include <openssl/evp.h>        /* Needed for bpi runt block processing */
9
10 #include <rte_memcpy.h>
11 #include <rte_common.h>
12 #include <rte_spinlock.h>
13 #include <rte_byteorder.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_crypto_sym.h>
17 #ifdef RTE_LIB_SECURITY
18 #include <rte_security.h>
19 #endif
20
21 #include "qat_logs.h"
22 #include "qat_sym_session.h"
23 #include "qat_sym_pmd.h"
24
25 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
26 static const uint8_t sha1InitialState[] = {
27         0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
28         0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
29
30 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
31 static const uint8_t sha224InitialState[] = {
32         0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
33         0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
34         0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
35
36 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
37 static const uint8_t sha256InitialState[] = {
38         0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
39         0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
40         0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
41
42 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
43 static const uint8_t sha384InitialState[] = {
44         0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
45         0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
46         0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
47         0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
48         0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
49         0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
50
51 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
52 static const uint8_t sha512InitialState[] = {
53         0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
54         0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
55         0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
56         0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
57         0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
58         0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
59
60 static int
61 qat_sym_cd_cipher_set(struct qat_sym_session *cd,
62                                                 const uint8_t *enckey,
63                                                 uint32_t enckeylen);
64
65 static int
66 qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
67                                                 const uint8_t *authkey,
68                                                 uint32_t authkeylen,
69                                                 uint32_t aad_length,
70                                                 uint32_t digestsize,
71                                                 unsigned int operation);
72 static void
73 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
74
75 /* Req/cd init functions */
76
77 static void
78 qat_sym_session_finalize(struct qat_sym_session *session)
79 {
80         qat_sym_session_init_common_hdr(session);
81 }
82
83 /** Frees a context previously created
84  *  Depends on openssl libcrypto
85  */
86 static void
87 bpi_cipher_ctx_free(void *bpi_ctx)
88 {
89         if (bpi_ctx != NULL)
90                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
91 }
92
93 /** Creates a context in either AES or DES in ECB mode
94  *  Depends on openssl libcrypto
95  */
96 static int
97 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
98                 enum rte_crypto_cipher_operation direction __rte_unused,
99                 const uint8_t *key, uint16_t key_length, void **ctx)
100 {
101         const EVP_CIPHER *algo = NULL;
102         int ret;
103         *ctx = EVP_CIPHER_CTX_new();
104
105         if (*ctx == NULL) {
106                 ret = -ENOMEM;
107                 goto ctx_init_err;
108         }
109
110         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
111                 algo = EVP_des_ecb();
112         else
113                 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
114                         algo = EVP_aes_128_ecb();
115                 else
116                         algo = EVP_aes_256_ecb();
117
118         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
119         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
120                 ret = -EINVAL;
121                 goto ctx_init_err;
122         }
123
124         return 0;
125
126 ctx_init_err:
127         if (*ctx != NULL)
128                 EVP_CIPHER_CTX_free(*ctx);
129         return ret;
130 }
131
132 static int
133 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
134                 struct qat_cryptodev_private *internals)
135 {
136         int i = 0;
137         const struct rte_cryptodev_capabilities *capability;
138
139         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
140                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
141                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
142                         continue;
143
144                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
145                         continue;
146
147                 if (capability->sym.cipher.algo == algo)
148                         return 1;
149         }
150         return 0;
151 }
152
153 static int
154 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
155                 struct qat_cryptodev_private *internals)
156 {
157         int i = 0;
158         const struct rte_cryptodev_capabilities *capability;
159
160         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
161                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
162                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
163                         continue;
164
165                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
166                         continue;
167
168                 if (capability->sym.auth.algo == algo)
169                         return 1;
170         }
171         return 0;
172 }
173
174 void
175 qat_sym_session_clear(struct rte_cryptodev *dev,
176                 struct rte_cryptodev_sym_session *sess)
177 {
178         uint8_t index = dev->driver_id;
179         void *sess_priv = get_sym_session_private_data(sess, index);
180         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
181
182         if (sess_priv) {
183                 if (s->bpi_ctx)
184                         bpi_cipher_ctx_free(s->bpi_ctx);
185                 memset(s, 0, qat_sym_session_get_private_size(dev));
186                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
187
188                 set_sym_session_private_data(sess, index, NULL);
189                 rte_mempool_put(sess_mp, sess_priv);
190         }
191 }
192
193 static int
194 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
195 {
196         /* Cipher Only */
197         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
198                 return ICP_QAT_FW_LA_CMD_CIPHER;
199
200         /* Authentication Only */
201         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
202                 return ICP_QAT_FW_LA_CMD_AUTH;
203
204         /* AEAD */
205         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
206                 /* AES-GCM and AES-CCM works with different direction
207                  * GCM first encrypts and generate hash where AES-CCM
208                  * first generate hash and encrypts. Similar relation
209                  * applies to decryption.
210                  */
211                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
212                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
213                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
214                         else
215                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
216                 else
217                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
218                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
219                         else
220                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
221         }
222
223         if (xform->next == NULL)
224                 return -1;
225
226         /* Cipher then Authenticate */
227         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
228                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
229                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
230
231         /* Authenticate then Cipher */
232         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
233                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
234                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
235
236         return -1;
237 }
238
239 static struct rte_crypto_auth_xform *
240 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
241 {
242         do {
243                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
244                         return &xform->auth;
245
246                 xform = xform->next;
247         } while (xform);
248
249         return NULL;
250 }
251
252 static struct rte_crypto_cipher_xform *
253 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
254 {
255         do {
256                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
257                         return &xform->cipher;
258
259                 xform = xform->next;
260         } while (xform);
261
262         return NULL;
263 }
264
265 int
266 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
267                 struct rte_crypto_sym_xform *xform,
268                 struct qat_sym_session *session)
269 {
270         struct qat_cryptodev_private *internals = dev->data->dev_private;
271         struct rte_crypto_cipher_xform *cipher_xform = NULL;
272         enum qat_device_gen qat_dev_gen =
273                                 internals->qat_dev->qat_dev_gen;
274         int ret;
275
276         /* Get cipher xform from crypto xform chain */
277         cipher_xform = qat_get_cipher_xform(xform);
278
279         session->cipher_iv.offset = cipher_xform->iv.offset;
280         session->cipher_iv.length = cipher_xform->iv.length;
281
282         switch (cipher_xform->algo) {
283         case RTE_CRYPTO_CIPHER_AES_CBC:
284                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
285                                 &session->qat_cipher_alg) != 0) {
286                         QAT_LOG(ERR, "Invalid AES cipher key size");
287                         ret = -EINVAL;
288                         goto error_out;
289                 }
290                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
291                 break;
292         case RTE_CRYPTO_CIPHER_AES_CTR:
293                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
294                                 &session->qat_cipher_alg) != 0) {
295                         QAT_LOG(ERR, "Invalid AES cipher key size");
296                         ret = -EINVAL;
297                         goto error_out;
298                 }
299                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
300                 if (qat_dev_gen == QAT_GEN4)
301                         session->is_ucs = 1;
302                 break;
303         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
304                 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
305                                         &session->qat_cipher_alg) != 0) {
306                         QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
307                         ret = -EINVAL;
308                         goto error_out;
309                 }
310                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
311                 break;
312         case RTE_CRYPTO_CIPHER_NULL:
313                 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
314                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
315                 break;
316         case RTE_CRYPTO_CIPHER_KASUMI_F8:
317                 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
318                                         &session->qat_cipher_alg) != 0) {
319                         QAT_LOG(ERR, "Invalid KASUMI cipher key size");
320                         ret = -EINVAL;
321                         goto error_out;
322                 }
323                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
324                 break;
325         case RTE_CRYPTO_CIPHER_3DES_CBC:
326                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
327                                 &session->qat_cipher_alg) != 0) {
328                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
329                         ret = -EINVAL;
330                         goto error_out;
331                 }
332                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
333                 break;
334         case RTE_CRYPTO_CIPHER_DES_CBC:
335                 if (qat_sym_validate_des_key(cipher_xform->key.length,
336                                 &session->qat_cipher_alg) != 0) {
337                         QAT_LOG(ERR, "Invalid DES cipher key size");
338                         ret = -EINVAL;
339                         goto error_out;
340                 }
341                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
342                 break;
343         case RTE_CRYPTO_CIPHER_3DES_CTR:
344                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
345                                 &session->qat_cipher_alg) != 0) {
346                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
347                         ret = -EINVAL;
348                         goto error_out;
349                 }
350                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
351                 break;
352         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
353                 ret = bpi_cipher_ctx_init(
354                                         cipher_xform->algo,
355                                         cipher_xform->op,
356                                         cipher_xform->key.data,
357                                         cipher_xform->key.length,
358                                         &session->bpi_ctx);
359                 if (ret != 0) {
360                         QAT_LOG(ERR, "failed to create DES BPI ctx");
361                         goto error_out;
362                 }
363                 if (qat_sym_validate_des_key(cipher_xform->key.length,
364                                 &session->qat_cipher_alg) != 0) {
365                         QAT_LOG(ERR, "Invalid DES cipher key size");
366                         ret = -EINVAL;
367                         goto error_out;
368                 }
369                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
370                 break;
371         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
372                 ret = bpi_cipher_ctx_init(
373                                         cipher_xform->algo,
374                                         cipher_xform->op,
375                                         cipher_xform->key.data,
376                                         cipher_xform->key.length,
377                                         &session->bpi_ctx);
378                 if (ret != 0) {
379                         QAT_LOG(ERR, "failed to create AES BPI ctx");
380                         goto error_out;
381                 }
382                 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
383                                 &session->qat_cipher_alg) != 0) {
384                         QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
385                         ret = -EINVAL;
386                         goto error_out;
387                 }
388                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
389                 break;
390         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
391                 if (!qat_is_cipher_alg_supported(
392                         cipher_xform->algo, internals)) {
393                         QAT_LOG(ERR, "%s not supported on this device",
394                                 rte_crypto_cipher_algorithm_strings
395                                         [cipher_xform->algo]);
396                         ret = -ENOTSUP;
397                         goto error_out;
398                 }
399                 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
400                                 &session->qat_cipher_alg) != 0) {
401                         QAT_LOG(ERR, "Invalid ZUC cipher key size");
402                         ret = -EINVAL;
403                         goto error_out;
404                 }
405                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
406                 break;
407         case RTE_CRYPTO_CIPHER_AES_XTS:
408                 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
409                         QAT_LOG(ERR, "AES-XTS-192 not supported");
410                         ret = -EINVAL;
411                         goto error_out;
412                 }
413                 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
414                                 &session->qat_cipher_alg) != 0) {
415                         QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
416                         ret = -EINVAL;
417                         goto error_out;
418                 }
419                 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
420                 break;
421         case RTE_CRYPTO_CIPHER_3DES_ECB:
422         case RTE_CRYPTO_CIPHER_AES_ECB:
423         case RTE_CRYPTO_CIPHER_AES_F8:
424         case RTE_CRYPTO_CIPHER_ARC4:
425                 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
426                                 cipher_xform->algo);
427                 ret = -ENOTSUP;
428                 goto error_out;
429         default:
430                 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
431                                 cipher_xform->algo);
432                 ret = -EINVAL;
433                 goto error_out;
434         }
435
436         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
437                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
438         else
439                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
440
441         if (qat_sym_cd_cipher_set(session,
442                                                 cipher_xform->key.data,
443                                                 cipher_xform->key.length)) {
444                 ret = -EINVAL;
445                 goto error_out;
446         }
447
448         return 0;
449
450 error_out:
451         if (session->bpi_ctx) {
452                 bpi_cipher_ctx_free(session->bpi_ctx);
453                 session->bpi_ctx = NULL;
454         }
455         return ret;
456 }
457
458 int
459 qat_sym_session_configure(struct rte_cryptodev *dev,
460                 struct rte_crypto_sym_xform *xform,
461                 struct rte_cryptodev_sym_session *sess,
462                 struct rte_mempool *mempool)
463 {
464         void *sess_private_data;
465         int ret;
466
467         if (rte_mempool_get(mempool, &sess_private_data)) {
468                 CDEV_LOG_ERR(
469                         "Couldn't get object from session mempool");
470                 return -ENOMEM;
471         }
472
473         ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
474         if (ret != 0) {
475                 QAT_LOG(ERR,
476                     "Crypto QAT PMD: failed to configure session parameters");
477
478                 /* Return session to mempool */
479                 rte_mempool_put(mempool, sess_private_data);
480                 return ret;
481         }
482
483         set_sym_session_private_data(sess, dev->driver_id,
484                 sess_private_data);
485
486         return 0;
487 }
488
489 static void
490 qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
491                 uint8_t hash_flag)
492 {
493         struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
494         struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
495                         (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
496                         session->fw_req.cd_ctrl.content_desc_ctrl_lw;
497
498         /* Set the Use Extended Protocol Flags bit in LW 1 */
499         QAT_FIELD_SET(header->comn_req_flags,
500                         QAT_COMN_EXT_FLAGS_USED,
501                         QAT_COMN_EXT_FLAGS_BITPOS,
502                         QAT_COMN_EXT_FLAGS_MASK);
503
504         /* Set Hash Flags in LW 28 */
505         cd_ctrl->hash_flags |= hash_flag;
506
507         /* Set proto flags in LW 1 */
508         switch (session->qat_cipher_alg) {
509         case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
510                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
511                                 ICP_QAT_FW_LA_SNOW_3G_PROTO);
512                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
513                                 header->serv_specif_flags, 0);
514                 break;
515         case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
516                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
517                                 ICP_QAT_FW_LA_NO_PROTO);
518                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
519                                 header->serv_specif_flags,
520                                 ICP_QAT_FW_LA_ZUC_3G_PROTO);
521                 break;
522         default:
523                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
524                                 ICP_QAT_FW_LA_NO_PROTO);
525                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
526                                 header->serv_specif_flags, 0);
527                 break;
528         }
529 }
530
531 static void
532 qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
533                 struct qat_sym_session *session)
534 {
535         const struct qat_cryptodev_private *qat_private =
536                         dev->data->dev_private;
537         enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
538                         QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
539
540         if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
541                         session->qat_cipher_alg !=
542                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
543                 session->min_qat_dev_gen = min_dev_gen;
544                 qat_sym_session_set_ext_hash_flags(session,
545                         1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
546         } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
547                         session->qat_cipher_alg !=
548                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
549                 session->min_qat_dev_gen = min_dev_gen;
550                 qat_sym_session_set_ext_hash_flags(session,
551                         1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
552         } else if ((session->aes_cmac ||
553                         session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
554                         (session->qat_cipher_alg ==
555                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
556                         session->qat_cipher_alg ==
557                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
558                 session->min_qat_dev_gen = min_dev_gen;
559                 qat_sym_session_set_ext_hash_flags(session, 0);
560         }
561 }
562
563 int
564 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
565                 struct rte_crypto_sym_xform *xform, void *session_private)
566 {
567         struct qat_sym_session *session = session_private;
568         struct qat_cryptodev_private *internals = dev->data->dev_private;
569         enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
570         int ret;
571         int qat_cmd_id;
572         int handle_mixed = 0;
573
574         /* Verify the session physical address is known */
575         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
576         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
577                 QAT_LOG(ERR,
578                         "Session physical address unknown. Bad memory pool.");
579                 return -EINVAL;
580         }
581
582         memset(session, 0, sizeof(*session));
583         /* Set context descriptor physical address */
584         session->cd_paddr = session_paddr +
585                         offsetof(struct qat_sym_session, cd);
586
587         session->min_qat_dev_gen = QAT_GEN1;
588         session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
589         session->is_ucs = 0;
590
591         /* Get requested QAT command id */
592         qat_cmd_id = qat_get_cmd_id(xform);
593         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
594                 QAT_LOG(ERR, "Unsupported xform chain requested");
595                 return -ENOTSUP;
596         }
597         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
598         switch (session->qat_cmd) {
599         case ICP_QAT_FW_LA_CMD_CIPHER:
600                 ret = qat_sym_session_configure_cipher(dev, xform, session);
601                 if (ret < 0)
602                         return ret;
603                 break;
604         case ICP_QAT_FW_LA_CMD_AUTH:
605                 ret = qat_sym_session_configure_auth(dev, xform, session);
606                 if (ret < 0)
607                         return ret;
608                 session->is_single_pass_gmac =
609                                qat_dev_gen == QAT_GEN3 &&
610                                xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
611                                xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
612                 break;
613         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
614                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
615                         ret = qat_sym_session_configure_aead(dev, xform,
616                                         session);
617                         if (ret < 0)
618                                 return ret;
619                 } else {
620                         ret = qat_sym_session_configure_cipher(dev,
621                                         xform, session);
622                         if (ret < 0)
623                                 return ret;
624                         ret = qat_sym_session_configure_auth(dev,
625                                         xform, session);
626                         if (ret < 0)
627                                 return ret;
628                         handle_mixed = 1;
629                 }
630                 break;
631         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
632                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
633                         ret = qat_sym_session_configure_aead(dev, xform,
634                                         session);
635                         if (ret < 0)
636                                 return ret;
637                 } else {
638                         ret = qat_sym_session_configure_auth(dev,
639                                         xform, session);
640                         if (ret < 0)
641                                 return ret;
642                         ret = qat_sym_session_configure_cipher(dev,
643                                         xform, session);
644                         if (ret < 0)
645                                 return ret;
646                         handle_mixed = 1;
647                 }
648                 break;
649         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
650         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
651         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
652         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
653         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
654         case ICP_QAT_FW_LA_CMD_MGF1:
655         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
656         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
657         case ICP_QAT_FW_LA_CMD_DELIMITER:
658         QAT_LOG(ERR, "Unsupported Service %u",
659                 session->qat_cmd);
660                 return -ENOTSUP;
661         default:
662         QAT_LOG(ERR, "Unsupported Service %u",
663                 session->qat_cmd);
664                 return -ENOTSUP;
665         }
666         qat_sym_session_finalize(session);
667         if (handle_mixed) {
668                 /* Special handling of mixed hash+cipher algorithms */
669                 qat_sym_session_handle_mixed(dev, session);
670         }
671
672         return 0;
673 }
674
675 static int
676 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
677                 const struct rte_crypto_aead_xform *aead_xform)
678 {
679         session->is_single_pass = 1;
680         session->is_auth = 1;
681         session->min_qat_dev_gen = QAT_GEN3;
682         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
683         /* Chacha-Poly is special case that use QAT CTR mode */
684         if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
685                 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
686         } else {
687                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
688         }
689         session->cipher_iv.offset = aead_xform->iv.offset;
690         session->cipher_iv.length = aead_xform->iv.length;
691         session->aad_len = aead_xform->aad_length;
692         session->digest_length = aead_xform->digest_length;
693
694         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
695                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
696                 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
697         } else {
698                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
699                 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
700         }
701
702         return 0;
703 }
704
705 int
706 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
707                                 struct rte_crypto_sym_xform *xform,
708                                 struct qat_sym_session *session)
709 {
710         struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
711         struct qat_cryptodev_private *internals = dev->data->dev_private;
712         const uint8_t *key_data = auth_xform->key.data;
713         uint8_t key_length = auth_xform->key.length;
714         enum qat_device_gen qat_dev_gen =
715                         internals->qat_dev->qat_dev_gen;
716
717         session->aes_cmac = 0;
718         session->auth_key_length = auth_xform->key.length;
719         session->auth_iv.offset = auth_xform->iv.offset;
720         session->auth_iv.length = auth_xform->iv.length;
721         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
722         session->is_auth = 1;
723         session->digest_length = auth_xform->digest_length;
724
725         switch (auth_xform->algo) {
726         case RTE_CRYPTO_AUTH_SHA1:
727                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
728                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
729                 break;
730         case RTE_CRYPTO_AUTH_SHA224:
731                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
732                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
733                 break;
734         case RTE_CRYPTO_AUTH_SHA256:
735                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
736                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
737                 break;
738         case RTE_CRYPTO_AUTH_SHA384:
739                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
740                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
741                 break;
742         case RTE_CRYPTO_AUTH_SHA512:
743                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
744                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
745                 break;
746         case RTE_CRYPTO_AUTH_SHA1_HMAC:
747                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
748                 break;
749         case RTE_CRYPTO_AUTH_SHA224_HMAC:
750                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
751                 break;
752         case RTE_CRYPTO_AUTH_SHA256_HMAC:
753                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
754                 break;
755         case RTE_CRYPTO_AUTH_SHA384_HMAC:
756                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
757                 break;
758         case RTE_CRYPTO_AUTH_SHA512_HMAC:
759                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
760                 break;
761         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
762                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
763                 break;
764         case RTE_CRYPTO_AUTH_AES_CMAC:
765                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
766                 session->aes_cmac = 1;
767                 break;
768         case RTE_CRYPTO_AUTH_AES_GMAC:
769                 if (qat_sym_validate_aes_key(auth_xform->key.length,
770                                 &session->qat_cipher_alg) != 0) {
771                         QAT_LOG(ERR, "Invalid AES key size");
772                         return -EINVAL;
773                 }
774                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
775                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
776                 if (session->auth_iv.length == 0)
777                         session->auth_iv.length = AES_GCM_J0_LEN;
778                 else
779                         session->is_iv12B = 1;
780                 if (qat_dev_gen == QAT_GEN4) {
781                         session->is_cnt_zero = 1;
782                         session->is_ucs = 1;
783                 }
784                 break;
785         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
786                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
787                 break;
788         case RTE_CRYPTO_AUTH_MD5_HMAC:
789                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
790                 break;
791         case RTE_CRYPTO_AUTH_NULL:
792                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
793                 break;
794         case RTE_CRYPTO_AUTH_KASUMI_F9:
795                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
796                 break;
797         case RTE_CRYPTO_AUTH_ZUC_EIA3:
798                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
799                         QAT_LOG(ERR, "%s not supported on this device",
800                                 rte_crypto_auth_algorithm_strings
801                                 [auth_xform->algo]);
802                         return -ENOTSUP;
803                 }
804                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
805                 break;
806         case RTE_CRYPTO_AUTH_MD5:
807         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
808                 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
809                                 auth_xform->algo);
810                 return -ENOTSUP;
811         default:
812                 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
813                                 auth_xform->algo);
814                 return -EINVAL;
815         }
816
817         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
818                 session->is_gmac = 1;
819                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
820                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
821                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
822                         /*
823                          * It needs to create cipher desc content first,
824                          * then authentication
825                          */
826                         if (qat_sym_cd_cipher_set(session,
827                                                 auth_xform->key.data,
828                                                 auth_xform->key.length))
829                                 return -EINVAL;
830
831                         if (qat_sym_cd_auth_set(session,
832                                                 key_data,
833                                                 key_length,
834                                                 0,
835                                                 auth_xform->digest_length,
836                                                 auth_xform->op))
837                                 return -EINVAL;
838                 } else {
839                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
840                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
841                         /*
842                          * It needs to create authentication desc content first,
843                          * then cipher
844                          */
845
846                         if (qat_sym_cd_auth_set(session,
847                                         key_data,
848                                         key_length,
849                                         0,
850                                         auth_xform->digest_length,
851                                         auth_xform->op))
852                                 return -EINVAL;
853
854                         if (qat_sym_cd_cipher_set(session,
855                                                 auth_xform->key.data,
856                                                 auth_xform->key.length))
857                                 return -EINVAL;
858                 }
859         } else {
860                 if (qat_sym_cd_auth_set(session,
861                                 key_data,
862                                 key_length,
863                                 0,
864                                 auth_xform->digest_length,
865                                 auth_xform->op))
866                         return -EINVAL;
867         }
868
869         return 0;
870 }
871
872 int
873 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
874                                 struct rte_crypto_sym_xform *xform,
875                                 struct qat_sym_session *session)
876 {
877         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
878         enum rte_crypto_auth_operation crypto_operation;
879         struct qat_cryptodev_private *internals =
880                         dev->data->dev_private;
881         enum qat_device_gen qat_dev_gen =
882                         internals->qat_dev->qat_dev_gen;
883
884         /*
885          * Store AEAD IV parameters as cipher IV,
886          * to avoid unnecessary memory usage
887          */
888         session->cipher_iv.offset = xform->aead.iv.offset;
889         session->cipher_iv.length = xform->aead.iv.length;
890
891         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
892         session->is_auth = 1;
893         session->digest_length = aead_xform->digest_length;
894
895         session->is_single_pass = 0;
896         switch (aead_xform->algo) {
897         case RTE_CRYPTO_AEAD_AES_GCM:
898                 if (qat_sym_validate_aes_key(aead_xform->key.length,
899                                 &session->qat_cipher_alg) != 0) {
900                         QAT_LOG(ERR, "Invalid AES key size");
901                         return -EINVAL;
902                 }
903                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
904                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
905
906                 if (qat_dev_gen == QAT_GEN4)
907                         session->is_ucs = 1;
908                 if (session->cipher_iv.length == 0) {
909                         session->cipher_iv.length = AES_GCM_J0_LEN;
910                         break;
911                 }
912                 session->is_iv12B = 1;
913                 if (qat_dev_gen < QAT_GEN3)
914                         break;
915                 qat_sym_session_handle_single_pass(session,
916                                 aead_xform);
917                 break;
918         case RTE_CRYPTO_AEAD_AES_CCM:
919                 if (qat_sym_validate_aes_key(aead_xform->key.length,
920                                 &session->qat_cipher_alg) != 0) {
921                         QAT_LOG(ERR, "Invalid AES key size");
922                         return -EINVAL;
923                 }
924                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
925                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
926                 if (qat_dev_gen == QAT_GEN4)
927                         session->is_ucs = 1;
928                 break;
929         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
930                 if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
931                         return -EINVAL;
932                 if (qat_dev_gen == QAT_GEN4)
933                         session->is_ucs = 1;
934                 session->qat_cipher_alg =
935                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
936                 qat_sym_session_handle_single_pass(session,
937                                                 aead_xform);
938                 break;
939         default:
940                 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
941                                 aead_xform->algo);
942                 return -EINVAL;
943         }
944
945         if (session->is_single_pass) {
946                 if (qat_sym_cd_cipher_set(session,
947                                 aead_xform->key.data, aead_xform->key.length))
948                         return -EINVAL;
949         } else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
950                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
951                         (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
952                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
953                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
954                 /*
955                  * It needs to create cipher desc content first,
956                  * then authentication
957                  */
958                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
959                         RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
960
961                 if (qat_sym_cd_cipher_set(session,
962                                         aead_xform->key.data,
963                                         aead_xform->key.length))
964                         return -EINVAL;
965
966                 if (qat_sym_cd_auth_set(session,
967                                         aead_xform->key.data,
968                                         aead_xform->key.length,
969                                         aead_xform->aad_length,
970                                         aead_xform->digest_length,
971                                         crypto_operation))
972                         return -EINVAL;
973         } else {
974                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
975                 /*
976                  * It needs to create authentication desc content first,
977                  * then cipher
978                  */
979
980                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
981                         RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
982
983                 if (qat_sym_cd_auth_set(session,
984                                         aead_xform->key.data,
985                                         aead_xform->key.length,
986                                         aead_xform->aad_length,
987                                         aead_xform->digest_length,
988                                         crypto_operation))
989                         return -EINVAL;
990
991                 if (qat_sym_cd_cipher_set(session,
992                                         aead_xform->key.data,
993                                         aead_xform->key.length))
994                         return -EINVAL;
995         }
996
997         return 0;
998 }
999
1000 unsigned int qat_sym_session_get_private_size(
1001                 struct rte_cryptodev *dev __rte_unused)
1002 {
1003         return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
1004 }
1005
1006 /* returns block size in bytes per cipher algo */
1007 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
1008 {
1009         switch (qat_cipher_alg) {
1010         case ICP_QAT_HW_CIPHER_ALGO_DES:
1011                 return ICP_QAT_HW_DES_BLK_SZ;
1012         case ICP_QAT_HW_CIPHER_ALGO_3DES:
1013                 return ICP_QAT_HW_3DES_BLK_SZ;
1014         case ICP_QAT_HW_CIPHER_ALGO_AES128:
1015         case ICP_QAT_HW_CIPHER_ALGO_AES192:
1016         case ICP_QAT_HW_CIPHER_ALGO_AES256:
1017                 return ICP_QAT_HW_AES_BLK_SZ;
1018         default:
1019                 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
1020                 return -EFAULT;
1021         };
1022         return -EFAULT;
1023 }
1024
1025 /*
1026  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
1027  * This is digest size rounded up to nearest quadword
1028  */
1029 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1030 {
1031         switch (qat_hash_alg) {
1032         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1033                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
1034                                                 QAT_HW_DEFAULT_ALIGNMENT);
1035         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1036                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
1037                                                 QAT_HW_DEFAULT_ALIGNMENT);
1038         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1039                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
1040                                                 QAT_HW_DEFAULT_ALIGNMENT);
1041         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1042                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
1043                                                 QAT_HW_DEFAULT_ALIGNMENT);
1044         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1045                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1046                                                 QAT_HW_DEFAULT_ALIGNMENT);
1047         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1048                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
1049                                                 QAT_HW_DEFAULT_ALIGNMENT);
1050         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1051         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1052                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1053                                                 QAT_HW_DEFAULT_ALIGNMENT);
1054         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1055                 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1056                                                 QAT_HW_DEFAULT_ALIGNMENT);
1057         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1058                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1059                                                 QAT_HW_DEFAULT_ALIGNMENT);
1060         case ICP_QAT_HW_AUTH_ALGO_MD5:
1061                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1062                                                 QAT_HW_DEFAULT_ALIGNMENT);
1063         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1064                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1065                                                 QAT_HW_DEFAULT_ALIGNMENT);
1066         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1067                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1068                                                 QAT_HW_DEFAULT_ALIGNMENT);
1069         case ICP_QAT_HW_AUTH_ALGO_NULL:
1070                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1071                                                 QAT_HW_DEFAULT_ALIGNMENT);
1072         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1073                 /* return maximum state1 size in this case */
1074                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1075                                                 QAT_HW_DEFAULT_ALIGNMENT);
1076         default:
1077                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1078                 return -EFAULT;
1079         };
1080         return -EFAULT;
1081 }
1082
1083 /* returns digest size in bytes  per hash algo */
1084 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1085 {
1086         switch (qat_hash_alg) {
1087         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1088                 return ICP_QAT_HW_SHA1_STATE1_SZ;
1089         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1090                 return ICP_QAT_HW_SHA224_STATE1_SZ;
1091         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1092                 return ICP_QAT_HW_SHA256_STATE1_SZ;
1093         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1094                 return ICP_QAT_HW_SHA384_STATE1_SZ;
1095         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1096                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1097         case ICP_QAT_HW_AUTH_ALGO_MD5:
1098                 return ICP_QAT_HW_MD5_STATE1_SZ;
1099         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1100                 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1101         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1102                 /* return maximum digest size in this case */
1103                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1104         default:
1105                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1106                 return -EFAULT;
1107         };
1108         return -EFAULT;
1109 }
1110
1111 /* returns block size in byes per hash algo */
1112 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1113 {
1114         switch (qat_hash_alg) {
1115         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1116                 return SHA_CBLOCK;
1117         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1118                 return SHA256_CBLOCK;
1119         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1120                 return SHA256_CBLOCK;
1121         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1122                 return SHA512_CBLOCK;
1123         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1124                 return SHA512_CBLOCK;
1125         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1126                 return 16;
1127         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1128                 return ICP_QAT_HW_AES_BLK_SZ;
1129         case ICP_QAT_HW_AUTH_ALGO_MD5:
1130                 return MD5_CBLOCK;
1131         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1132                 /* return maximum block size in this case */
1133                 return SHA512_CBLOCK;
1134         default:
1135                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1136                 return -EFAULT;
1137         };
1138         return -EFAULT;
1139 }
1140
1141 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1142 {
1143         SHA_CTX ctx;
1144
1145         if (!SHA1_Init(&ctx))
1146                 return -EFAULT;
1147         SHA1_Transform(&ctx, data_in);
1148         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1149         return 0;
1150 }
1151
1152 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1153 {
1154         SHA256_CTX ctx;
1155
1156         if (!SHA224_Init(&ctx))
1157                 return -EFAULT;
1158         SHA256_Transform(&ctx, data_in);
1159         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1160         return 0;
1161 }
1162
1163 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1164 {
1165         SHA256_CTX ctx;
1166
1167         if (!SHA256_Init(&ctx))
1168                 return -EFAULT;
1169         SHA256_Transform(&ctx, data_in);
1170         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1171         return 0;
1172 }
1173
1174 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1175 {
1176         SHA512_CTX ctx;
1177
1178         if (!SHA384_Init(&ctx))
1179                 return -EFAULT;
1180         SHA512_Transform(&ctx, data_in);
1181         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1182         return 0;
1183 }
1184
1185 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1186 {
1187         SHA512_CTX ctx;
1188
1189         if (!SHA512_Init(&ctx))
1190                 return -EFAULT;
1191         SHA512_Transform(&ctx, data_in);
1192         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1193         return 0;
1194 }
1195
1196 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1197 {
1198         MD5_CTX ctx;
1199
1200         if (!MD5_Init(&ctx))
1201                 return -EFAULT;
1202         MD5_Transform(&ctx, data_in);
1203         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1204
1205         return 0;
1206 }
1207
1208 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1209                         uint8_t *data_in,
1210                         uint8_t *data_out)
1211 {
1212         int digest_size;
1213         uint8_t digest[qat_hash_get_digest_size(
1214                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1215         uint32_t *hash_state_out_be32;
1216         uint64_t *hash_state_out_be64;
1217         int i;
1218
1219         /* Initialize to avoid gcc warning */
1220         memset(digest, 0, sizeof(digest));
1221
1222         digest_size = qat_hash_get_digest_size(hash_alg);
1223         if (digest_size <= 0)
1224                 return -EFAULT;
1225
1226         hash_state_out_be32 = (uint32_t *)data_out;
1227         hash_state_out_be64 = (uint64_t *)data_out;
1228
1229         switch (hash_alg) {
1230         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1231                 if (partial_hash_sha1(data_in, digest))
1232                         return -EFAULT;
1233                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1234                         *hash_state_out_be32 =
1235                                 rte_bswap32(*(((uint32_t *)digest)+i));
1236                 break;
1237         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1238                 if (partial_hash_sha224(data_in, digest))
1239                         return -EFAULT;
1240                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1241                         *hash_state_out_be32 =
1242                                 rte_bswap32(*(((uint32_t *)digest)+i));
1243                 break;
1244         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1245                 if (partial_hash_sha256(data_in, digest))
1246                         return -EFAULT;
1247                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1248                         *hash_state_out_be32 =
1249                                 rte_bswap32(*(((uint32_t *)digest)+i));
1250                 break;
1251         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1252                 if (partial_hash_sha384(data_in, digest))
1253                         return -EFAULT;
1254                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1255                         *hash_state_out_be64 =
1256                                 rte_bswap64(*(((uint64_t *)digest)+i));
1257                 break;
1258         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1259                 if (partial_hash_sha512(data_in, digest))
1260                         return -EFAULT;
1261                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1262                         *hash_state_out_be64 =
1263                                 rte_bswap64(*(((uint64_t *)digest)+i));
1264                 break;
1265         case ICP_QAT_HW_AUTH_ALGO_MD5:
1266                 if (partial_hash_md5(data_in, data_out))
1267                         return -EFAULT;
1268                 break;
1269         default:
1270                 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1271                 return -EFAULT;
1272         }
1273
1274         return 0;
1275 }
1276 #define HMAC_IPAD_VALUE 0x36
1277 #define HMAC_OPAD_VALUE 0x5c
1278 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1279
1280 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1281
1282 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1283 {
1284         int i;
1285
1286         derived[0] = base[0] << 1;
1287         for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1288                 derived[i] = base[i] << 1;
1289                 derived[i - 1] |= base[i] >> 7;
1290         }
1291
1292         if (base[0] & 0x80)
1293                 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1294 }
1295
1296 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1297                                 const uint8_t *auth_key,
1298                                 uint16_t auth_keylen,
1299                                 uint8_t *p_state_buf,
1300                                 uint16_t *p_state_len,
1301                                 uint8_t aes_cmac)
1302 {
1303         int block_size;
1304         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1305         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1306         int i;
1307
1308         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1309
1310                 /* CMAC */
1311                 if (aes_cmac) {
1312                         AES_KEY enc_key;
1313                         uint8_t *in = NULL;
1314                         uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1315                         uint8_t *k1, *k2;
1316
1317                         auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1318
1319                         in = rte_zmalloc("AES CMAC K1",
1320                                          ICP_QAT_HW_AES_128_KEY_SZ, 16);
1321
1322                         if (in == NULL) {
1323                                 QAT_LOG(ERR, "Failed to alloc memory");
1324                                 return -ENOMEM;
1325                         }
1326
1327                         rte_memcpy(in, AES_CMAC_SEED,
1328                                    ICP_QAT_HW_AES_128_KEY_SZ);
1329                         rte_memcpy(p_state_buf, auth_key, auth_keylen);
1330
1331                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1332                                 &enc_key) != 0) {
1333                                 rte_free(in);
1334                                 return -EFAULT;
1335                         }
1336
1337                         AES_encrypt(in, k0, &enc_key);
1338
1339                         k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1340                         k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1341
1342                         aes_cmac_key_derive(k0, k1);
1343                         aes_cmac_key_derive(k1, k2);
1344
1345                         memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1346                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1347                         rte_free(in);
1348                         return 0;
1349                 } else {
1350                         static uint8_t qat_aes_xcbc_key_seed[
1351                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1352                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1353                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1354                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1355                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1356                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1357                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1358                         };
1359
1360                         uint8_t *in = NULL;
1361                         uint8_t *out = p_state_buf;
1362                         int x;
1363                         AES_KEY enc_key;
1364
1365                         in = rte_zmalloc("working mem for key",
1366                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1367                         if (in == NULL) {
1368                                 QAT_LOG(ERR, "Failed to alloc memory");
1369                                 return -ENOMEM;
1370                         }
1371
1372                         rte_memcpy(in, qat_aes_xcbc_key_seed,
1373                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1374                         for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1375                                 if (AES_set_encrypt_key(auth_key,
1376                                                         auth_keylen << 3,
1377                                                         &enc_key) != 0) {
1378                                         rte_free(in -
1379                                           (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1380                                         memset(out -
1381                                            (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1382                                           0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1383                                         return -EFAULT;
1384                                 }
1385                                 AES_encrypt(in, out, &enc_key);
1386                                 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1387                                 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1388                         }
1389                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1390                         rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1391                         return 0;
1392                 }
1393
1394         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1395                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1396                 uint8_t *in = NULL;
1397                 uint8_t *out = p_state_buf;
1398                 AES_KEY enc_key;
1399
1400                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1401                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1402                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1403                 in = rte_zmalloc("working mem for key",
1404                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
1405                 if (in == NULL) {
1406                         QAT_LOG(ERR, "Failed to alloc memory");
1407                         return -ENOMEM;
1408                 }
1409
1410                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1411                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1412                         &enc_key) != 0) {
1413                         return -EFAULT;
1414                 }
1415                 AES_encrypt(in, out, &enc_key);
1416                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1417                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1418                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1419                 rte_free(in);
1420                 return 0;
1421         }
1422
1423         block_size = qat_hash_get_block_size(hash_alg);
1424         if (block_size < 0)
1425                 return block_size;
1426         /* init ipad and opad from key and xor with fixed values */
1427         memset(ipad, 0, block_size);
1428         memset(opad, 0, block_size);
1429
1430         if (auth_keylen > (unsigned int)block_size) {
1431                 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1432                 return -EFAULT;
1433         }
1434         rte_memcpy(ipad, auth_key, auth_keylen);
1435         rte_memcpy(opad, auth_key, auth_keylen);
1436
1437         for (i = 0; i < block_size; i++) {
1438                 uint8_t *ipad_ptr = ipad + i;
1439                 uint8_t *opad_ptr = opad + i;
1440                 *ipad_ptr ^= HMAC_IPAD_VALUE;
1441                 *opad_ptr ^= HMAC_OPAD_VALUE;
1442         }
1443
1444         /* do partial hash of ipad and copy to state1 */
1445         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1446                 memset(ipad, 0, block_size);
1447                 memset(opad, 0, block_size);
1448                 QAT_LOG(ERR, "ipad precompute failed");
1449                 return -EFAULT;
1450         }
1451
1452         /*
1453          * State len is a multiple of 8, so may be larger than the digest.
1454          * Put the partial hash of opad state_len bytes after state1
1455          */
1456         *p_state_len = qat_hash_get_state1_size(hash_alg);
1457         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1458                 memset(ipad, 0, block_size);
1459                 memset(opad, 0, block_size);
1460                 QAT_LOG(ERR, "opad precompute failed");
1461                 return -EFAULT;
1462         }
1463
1464         /*  don't leave data lying around */
1465         memset(ipad, 0, block_size);
1466         memset(opad, 0, block_size);
1467         return 0;
1468 }
1469
1470 static void
1471 qat_sym_session_init_common_hdr(struct qat_sym_session *session)
1472 {
1473         struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
1474         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1475         enum qat_sym_proto_flag proto_flags = session->qat_proto_flag;
1476         uint32_t slice_flags = session->slice_types;
1477
1478         header->hdr_flags =
1479                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1480         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1481         header->service_cmd_id = session->qat_cmd;
1482         header->comn_req_flags =
1483                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1484                                         QAT_COMN_PTR_TYPE_FLAT);
1485         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1486                                   ICP_QAT_FW_LA_PARTIAL_NONE);
1487         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1488                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1489
1490         switch (proto_flags)            {
1491         case QAT_CRYPTO_PROTO_FLAG_NONE:
1492                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1493                                         ICP_QAT_FW_LA_NO_PROTO);
1494                 break;
1495         case QAT_CRYPTO_PROTO_FLAG_CCM:
1496                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1497                                         ICP_QAT_FW_LA_CCM_PROTO);
1498                 break;
1499         case QAT_CRYPTO_PROTO_FLAG_GCM:
1500                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1501                                         ICP_QAT_FW_LA_GCM_PROTO);
1502                 break;
1503         case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1504                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1505                                         ICP_QAT_FW_LA_SNOW_3G_PROTO);
1506                 break;
1507         case QAT_CRYPTO_PROTO_FLAG_ZUC:
1508                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1509                         ICP_QAT_FW_LA_ZUC_3G_PROTO);
1510                 break;
1511         }
1512
1513         /* More than one of the following flags can be set at once */
1514         if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_SPC)) {
1515                 ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
1516                         header->serv_specif_flags,
1517                         ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
1518         }
1519         if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_UCS)) {
1520                 ICP_QAT_FW_LA_SLICE_TYPE_SET(
1521                         header->serv_specif_flags,
1522                         ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
1523         }
1524
1525         if (session->is_auth) {
1526                 if (session->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1527                         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1528                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1529                         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1530                                         ICP_QAT_FW_LA_CMP_AUTH_RES);
1531                 } else if (session->auth_op == ICP_QAT_HW_AUTH_GENERATE) {
1532                         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1533                                                 ICP_QAT_FW_LA_RET_AUTH_RES);
1534                         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1535                                                 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1536                 }
1537         } else {
1538                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1539                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1540                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1541                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1542         }
1543
1544         if (session->is_iv12B) {
1545                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1546                         header->serv_specif_flags,
1547                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1548         }
1549
1550         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1551                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
1552         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1553                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1554 }
1555
1556 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
1557                                                 const uint8_t *cipherkey,
1558                                                 uint32_t cipherkeylen)
1559 {
1560         struct icp_qat_hw_cipher_algo_blk *cipher;
1561         struct icp_qat_hw_cipher_algo_blk20 *cipher20;
1562         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1563         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1564         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1565         void *ptr = &req_tmpl->cd_ctrl;
1566         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1567         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1568         enum icp_qat_hw_cipher_convert key_convert;
1569         struct icp_qat_fw_la_cipher_20_req_params *req_ucs =
1570                         (struct icp_qat_fw_la_cipher_20_req_params *)
1571                         &cdesc->fw_req.serv_specif_rqpars;
1572         struct icp_qat_fw_la_cipher_req_params *req_cipher =
1573                         (struct icp_qat_fw_la_cipher_req_params *)
1574                         &cdesc->fw_req.serv_specif_rqpars;
1575         uint32_t total_key_size;
1576         uint16_t cipher_offset, cd_size;
1577         uint32_t wordIndex  = 0;
1578         uint32_t *temp_key = NULL;
1579
1580         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1581                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1582                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1583                                         ICP_QAT_FW_SLICE_CIPHER);
1584                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1585                                         ICP_QAT_FW_SLICE_DRAM_WR);
1586                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1587                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1588                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1589                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1590                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1591         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1592                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1593                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1594                                         ICP_QAT_FW_SLICE_CIPHER);
1595                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1596                                         ICP_QAT_FW_SLICE_AUTH);
1597                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1598                                         ICP_QAT_FW_SLICE_AUTH);
1599                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1600                                         ICP_QAT_FW_SLICE_DRAM_WR);
1601                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1602         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1603                 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1604                 return -EFAULT;
1605         }
1606
1607         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1608                 /*
1609                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
1610                  * Overriding default values previously set.
1611                  * Chacha20-Poly1305 is special case, CTR but single-pass
1612                  * so both direction need to be used.
1613                  */
1614                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1615                 if (cdesc->qat_cipher_alg ==
1616                         ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 &&
1617                         cdesc->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1618                                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
1619                 }
1620                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1621         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1622                 || cdesc->qat_cipher_alg ==
1623                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1624                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1625         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1626                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1627         else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE)
1628                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1629         else
1630                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1631
1632         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1633                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1634                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1635                 cipher_cd_ctrl->cipher_state_sz =
1636                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1637                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1638
1639         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1640                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1641                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1642                 cipher_cd_ctrl->cipher_padding_sz =
1643                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1644         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1645                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1646                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1647         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1648                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1649                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1650         } else if (cdesc->qat_cipher_alg ==
1651                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1652                 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1653                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1654                 cipher_cd_ctrl->cipher_state_sz =
1655                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1656                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1657                 cdesc->min_qat_dev_gen = QAT_GEN2;
1658         } else {
1659                 total_key_size = cipherkeylen;
1660                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1661         }
1662         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1663         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1664
1665         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1666         cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
1667         cipher->cipher_config.val =
1668             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1669                                         cdesc->qat_cipher_alg, key_convert,
1670                                         cdesc->qat_dir);
1671
1672         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1673                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1674                                         sizeof(struct icp_qat_hw_cipher_config)
1675                                         + cipherkeylen);
1676                 memcpy(cipher->key, cipherkey, cipherkeylen);
1677                 memcpy(temp_key, cipherkey, cipherkeylen);
1678
1679                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1680                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1681                                                                 wordIndex++)
1682                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1683
1684                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1685                                         cipherkeylen + cipherkeylen;
1686         } else if (cdesc->is_ucs) {
1687                 const uint8_t *final_key = cipherkey;
1688
1689                 cdesc->slice_types |= QAT_CRYPTO_SLICE_UCS;
1690                 total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
1691                         ICP_QAT_HW_AES_128_KEY_SZ);
1692                 cipher20->cipher_config.reserved[0] = 0;
1693                 cipher20->cipher_config.reserved[1] = 0;
1694                 cipher20->cipher_config.reserved[2] = 0;
1695
1696                 rte_memcpy(cipher20->key, final_key, cipherkeylen);
1697                 cdesc->cd_cur_ptr +=
1698                         sizeof(struct icp_qat_hw_ucs_cipher_config) +
1699                                         cipherkeylen;
1700         } else {
1701                 memcpy(cipher->key, cipherkey, cipherkeylen);
1702                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1703                                         cipherkeylen;
1704         }
1705
1706         if (cdesc->is_single_pass) {
1707                 QAT_FIELD_SET(cipher->cipher_config.val,
1708                         cdesc->digest_length,
1709                         QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
1710                         QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
1711                 /* UCS and SPC 1.8/2.0 share configuration of 2nd config word */
1712                 cdesc->cd.cipher.cipher_config.reserved =
1713                                 ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
1714                                         cdesc->aad_len);
1715                 cdesc->slice_types |= QAT_CRYPTO_SLICE_SPC;
1716         }
1717
1718         if (total_key_size > cipherkeylen) {
1719                 uint32_t padding_size =  total_key_size-cipherkeylen;
1720                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1721                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1722                         /* K3 not provided so use K1 = K3*/
1723                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1724                 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1725                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1726                         /* K2 and K3 not provided so use K1 = K2 = K3*/
1727                         memcpy(cdesc->cd_cur_ptr, cipherkey,
1728                                 cipherkeylen);
1729                         memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1730                                 cipherkey, cipherkeylen);
1731                 } else
1732                         memset(cdesc->cd_cur_ptr, 0, padding_size);
1733
1734                 cdesc->cd_cur_ptr += padding_size;
1735         }
1736         if (cdesc->is_ucs) {
1737                 /*
1738                  * These values match in terms of position auth
1739                  * slice request fields
1740                  */
1741                 req_ucs->spc_auth_res_sz = cdesc->digest_length;
1742                 if (!cdesc->is_gmac) {
1743                         req_ucs->spc_aad_sz = cdesc->aad_len;
1744                         req_ucs->spc_aad_offset = 0;
1745                 }
1746         } else if (cdesc->is_single_pass) {
1747                 req_cipher->spc_aad_sz = cdesc->aad_len;
1748                 req_cipher->spc_auth_res_sz = cdesc->digest_length;
1749         }
1750         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1751         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1752         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1753
1754         return 0;
1755 }
1756
1757 int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
1758                                                 const uint8_t *authkey,
1759                                                 uint32_t authkeylen,
1760                                                 uint32_t aad_length,
1761                                                 uint32_t digestsize,
1762                                                 unsigned int operation)
1763 {
1764         struct icp_qat_hw_auth_setup *hash;
1765         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1766         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1767         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1768         void *ptr = &req_tmpl->cd_ctrl;
1769         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1770         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1771         struct icp_qat_fw_la_auth_req_params *auth_param =
1772                 (struct icp_qat_fw_la_auth_req_params *)
1773                 ((char *)&req_tmpl->serv_specif_rqpars +
1774                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1775         uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1776         uint16_t hash_offset, cd_size;
1777         uint32_t *aad_len = NULL;
1778         uint32_t wordIndex  = 0;
1779         uint32_t *pTempKey;
1780
1781         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1782                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1783                                         ICP_QAT_FW_SLICE_AUTH);
1784                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1785                                         ICP_QAT_FW_SLICE_DRAM_WR);
1786                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1787         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1788                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1789                                 ICP_QAT_FW_SLICE_AUTH);
1790                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1791                                 ICP_QAT_FW_SLICE_CIPHER);
1792                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1793                                 ICP_QAT_FW_SLICE_CIPHER);
1794                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1795                                 ICP_QAT_FW_SLICE_DRAM_WR);
1796                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1797         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1798                 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1799                 return -EFAULT;
1800         }
1801
1802         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY)
1803                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
1804         else
1805                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
1806
1807         /*
1808          * Setup the inner hash config
1809          */
1810         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1811         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
1812         hash->auth_config.reserved = 0;
1813         hash->auth_config.config =
1814                         ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
1815                                 cdesc->qat_hash_alg, digestsize);
1816
1817         if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
1818                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
1819                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
1820                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
1821                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
1822                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
1823                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
1824                 || cdesc->is_cnt_zero
1825                         )
1826                 hash->auth_counter.counter = 0;
1827         else {
1828                 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
1829
1830                 if (block_size < 0)
1831                         return block_size;
1832                 hash->auth_counter.counter = rte_bswap32(block_size);
1833         }
1834
1835         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
1836
1837         /*
1838          * cd_cur_ptr now points at the state1 information.
1839          */
1840         switch (cdesc->qat_hash_alg) {
1841         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1842                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1843                         /* Plain SHA-1 */
1844                         rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
1845                                         sizeof(sha1InitialState));
1846                         state1_size = qat_hash_get_state1_size(
1847                                         cdesc->qat_hash_alg);
1848                         break;
1849                 }
1850                 /* SHA-1 HMAC */
1851                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
1852                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1853                         cdesc->aes_cmac)) {
1854                         QAT_LOG(ERR, "(SHA)precompute failed");
1855                         return -EFAULT;
1856                 }
1857                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
1858                 break;
1859         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1860                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1861                         /* Plain SHA-224 */
1862                         rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
1863                                         sizeof(sha224InitialState));
1864                         state1_size = qat_hash_get_state1_size(
1865                                         cdesc->qat_hash_alg);
1866                         break;
1867                 }
1868                 /* SHA-224 HMAC */
1869                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
1870                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1871                         cdesc->aes_cmac)) {
1872                         QAT_LOG(ERR, "(SHA)precompute failed");
1873                         return -EFAULT;
1874                 }
1875                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
1876                 break;
1877         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1878                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1879                         /* Plain SHA-256 */
1880                         rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
1881                                         sizeof(sha256InitialState));
1882                         state1_size = qat_hash_get_state1_size(
1883                                         cdesc->qat_hash_alg);
1884                         break;
1885                 }
1886                 /* SHA-256 HMAC */
1887                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
1888                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1889                         cdesc->aes_cmac)) {
1890                         QAT_LOG(ERR, "(SHA)precompute failed");
1891                         return -EFAULT;
1892                 }
1893                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
1894                 break;
1895         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1896                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1897                         /* Plain SHA-384 */
1898                         rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
1899                                         sizeof(sha384InitialState));
1900                         state1_size = qat_hash_get_state1_size(
1901                                         cdesc->qat_hash_alg);
1902                         break;
1903                 }
1904                 /* SHA-384 HMAC */
1905                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
1906                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1907                         cdesc->aes_cmac)) {
1908                         QAT_LOG(ERR, "(SHA)precompute failed");
1909                         return -EFAULT;
1910                 }
1911                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
1912                 break;
1913         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1914                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1915                         /* Plain SHA-512 */
1916                         rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
1917                                         sizeof(sha512InitialState));
1918                         state1_size = qat_hash_get_state1_size(
1919                                         cdesc->qat_hash_alg);
1920                         break;
1921                 }
1922                 /* SHA-512 HMAC */
1923                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
1924                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1925                         cdesc->aes_cmac)) {
1926                         QAT_LOG(ERR, "(SHA)precompute failed");
1927                         return -EFAULT;
1928                 }
1929                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
1930                 break;
1931         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1932                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1933
1934                 if (cdesc->aes_cmac)
1935                         memset(cdesc->cd_cur_ptr, 0, state1_size);
1936                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
1937                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
1938                         &state2_size, cdesc->aes_cmac)) {
1939                         cdesc->aes_cmac ? QAT_LOG(ERR,
1940                                                   "(CMAC)precompute failed")
1941                                         : QAT_LOG(ERR,
1942                                                   "(XCBC)precompute failed");
1943                         return -EFAULT;
1944                 }
1945                 break;
1946         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1947         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1948                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1949                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
1950                 if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
1951                         authkeylen, cdesc->cd_cur_ptr + state1_size,
1952                         &state2_size, cdesc->aes_cmac)) {
1953                         QAT_LOG(ERR, "(GCM)precompute failed");
1954                         return -EFAULT;
1955                 }
1956                 /*
1957                  * Write (the length of AAD) into bytes 16-19 of state2
1958                  * in big-endian format. This field is 8 bytes
1959                  */
1960                 auth_param->u2.aad_sz =
1961                                 RTE_ALIGN_CEIL(aad_length, 16);
1962                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
1963
1964                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
1965                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
1966                                         ICP_QAT_HW_GALOIS_H_SZ);
1967                 *aad_len = rte_bswap32(aad_length);
1968                 cdesc->aad_len = aad_length;
1969                 break;
1970         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1971                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1972                 state1_size = qat_hash_get_state1_size(
1973                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
1974                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
1975                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1976
1977                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
1978                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
1979                 cipherconfig->cipher_config.val =
1980                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
1981                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
1982                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
1983                         ICP_QAT_HW_CIPHER_ENCRYPT);
1984                 memcpy(cipherconfig->key, authkey, authkeylen);
1985                 memset(cipherconfig->key + authkeylen,
1986                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
1987                 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
1988                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1989                 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1990                 break;
1991         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1992                 hash->auth_config.config =
1993                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
1994                                 cdesc->qat_hash_alg, digestsize);
1995                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1996                 state1_size = qat_hash_get_state1_size(
1997                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
1998                 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
1999                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
2000                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
2001
2002                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2003                 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
2004                 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
2005                 cdesc->min_qat_dev_gen = QAT_GEN2;
2006
2007                 break;
2008         case ICP_QAT_HW_AUTH_ALGO_MD5:
2009                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
2010                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
2011                         cdesc->aes_cmac)) {
2012                         QAT_LOG(ERR, "(MD5)precompute failed");
2013                         return -EFAULT;
2014                 }
2015                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
2016                 break;
2017         case ICP_QAT_HW_AUTH_ALGO_NULL:
2018                 state1_size = qat_hash_get_state1_size(
2019                                 ICP_QAT_HW_AUTH_ALGO_NULL);
2020                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
2021                 break;
2022         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
2023                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
2024                 state1_size = qat_hash_get_state1_size(
2025                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
2026                 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
2027                                 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
2028
2029                 if (aad_length > 0) {
2030                         aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
2031                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
2032                         auth_param->u2.aad_sz =
2033                         RTE_ALIGN_CEIL(aad_length,
2034                         ICP_QAT_HW_CCM_AAD_ALIGNMENT);
2035                 } else {
2036                         auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
2037                 }
2038                 cdesc->aad_len = aad_length;
2039                 hash->auth_counter.counter = 0;
2040
2041                 hash_cd_ctrl->outer_prefix_sz = digestsize;
2042                 auth_param->hash_state_sz = digestsize;
2043
2044                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2045                 break;
2046         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
2047                 state1_size = qat_hash_get_state1_size(
2048                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
2049                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
2050                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
2051                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
2052                                                         + authkeylen);
2053                 /*
2054                 * The Inner Hash Initial State2 block must contain IK
2055                 * (Initialisation Key), followed by IK XOR-ed with KM
2056                 * (Key Modifier): IK||(IK^KM).
2057                 */
2058                 /* write the auth key */
2059                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2060                 /* initialise temp key with auth key */
2061                 memcpy(pTempKey, authkey, authkeylen);
2062                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
2063                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
2064                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
2065                 break;
2066         default:
2067                 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
2068                 return -EFAULT;
2069         }
2070
2071         /* Auth CD config setup */
2072         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
2073         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
2074         hash_cd_ctrl->inner_res_sz = digestsize;
2075         hash_cd_ctrl->final_sz = digestsize;
2076         hash_cd_ctrl->inner_state1_sz = state1_size;
2077         auth_param->auth_res_sz = digestsize;
2078
2079         hash_cd_ctrl->inner_state2_sz  = state2_size;
2080         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2081                         ((sizeof(struct icp_qat_hw_auth_setup) +
2082                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2083                                         >> 3);
2084
2085         cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2086         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2087
2088         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2089         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2090
2091         return 0;
2092 }
2093
2094 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2095 {
2096         switch (key_len) {
2097         case ICP_QAT_HW_AES_128_KEY_SZ:
2098                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2099                 break;
2100         case ICP_QAT_HW_AES_192_KEY_SZ:
2101                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2102                 break;
2103         case ICP_QAT_HW_AES_256_KEY_SZ:
2104                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2105                 break;
2106         default:
2107                 return -EINVAL;
2108         }
2109         return 0;
2110 }
2111
2112 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2113                 enum icp_qat_hw_cipher_algo *alg)
2114 {
2115         switch (key_len) {
2116         case ICP_QAT_HW_AES_128_KEY_SZ:
2117                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2118                 break;
2119         case ICP_QAT_HW_AES_256_KEY_SZ:
2120                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2121                 break;
2122         default:
2123                 return -EINVAL;
2124         }
2125         return 0;
2126 }
2127
2128 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2129 {
2130         switch (key_len) {
2131         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2132                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2133                 break;
2134         default:
2135                 return -EINVAL;
2136         }
2137         return 0;
2138 }
2139
2140 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2141 {
2142         switch (key_len) {
2143         case ICP_QAT_HW_KASUMI_KEY_SZ:
2144                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2145                 break;
2146         default:
2147                 return -EINVAL;
2148         }
2149         return 0;
2150 }
2151
2152 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2153 {
2154         switch (key_len) {
2155         case ICP_QAT_HW_DES_KEY_SZ:
2156                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2157                 break;
2158         default:
2159                 return -EINVAL;
2160         }
2161         return 0;
2162 }
2163
2164 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2165 {
2166         switch (key_len) {
2167         case QAT_3DES_KEY_SZ_OPT1:
2168         case QAT_3DES_KEY_SZ_OPT2:
2169         case QAT_3DES_KEY_SZ_OPT3:
2170                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2171                 break;
2172         default:
2173                 return -EINVAL;
2174         }
2175         return 0;
2176 }
2177
2178 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2179 {
2180         switch (key_len) {
2181         case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2182                 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2183                 break;
2184         default:
2185                 return -EINVAL;
2186         }
2187         return 0;
2188 }
2189
2190 #ifdef RTE_LIB_SECURITY
2191 static int
2192 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2193 {
2194         struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2195         struct rte_security_docsis_xform *docsis = &conf->docsis;
2196
2197         /* CRC generate -> Cipher encrypt */
2198         if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2199
2200                 if (crypto_sym != NULL &&
2201                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2202                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2203                     crypto_sym->cipher.algo ==
2204                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2205                     (crypto_sym->cipher.key.length ==
2206                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2207                      crypto_sym->cipher.key.length ==
2208                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2209                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2210                     crypto_sym->next == NULL) {
2211                         return 0;
2212                 }
2213         /* Cipher decrypt -> CRC verify */
2214         } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2215
2216                 if (crypto_sym != NULL &&
2217                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2218                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2219                     crypto_sym->cipher.algo ==
2220                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2221                     (crypto_sym->cipher.key.length ==
2222                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2223                      crypto_sym->cipher.key.length ==
2224                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2225                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2226                     crypto_sym->next == NULL) {
2227                         return 0;
2228                 }
2229         }
2230
2231         return -EINVAL;
2232 }
2233
2234 static int
2235 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2236                 struct rte_security_session_conf *conf, void *session_private)
2237 {
2238         int ret;
2239         int qat_cmd_id;
2240         struct rte_crypto_sym_xform *xform = NULL;
2241         struct qat_sym_session *session = session_private;
2242
2243         /* Clear the session */
2244         memset(session, 0, qat_sym_session_get_private_size(dev));
2245
2246         ret = qat_sec_session_check_docsis(conf);
2247         if (ret) {
2248                 QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2249                 return ret;
2250         }
2251
2252         xform = conf->crypto_xform;
2253
2254         /* Verify the session physical address is known */
2255         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2256         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2257                 QAT_LOG(ERR,
2258                         "Session physical address unknown. Bad memory pool.");
2259                 return -EINVAL;
2260         }
2261
2262         /* Set context descriptor physical address */
2263         session->cd_paddr = session_paddr +
2264                         offsetof(struct qat_sym_session, cd);
2265
2266         session->min_qat_dev_gen = QAT_GEN1;
2267
2268         /* Get requested QAT command id - should be cipher */
2269         qat_cmd_id = qat_get_cmd_id(xform);
2270         if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2271                 QAT_LOG(ERR, "Unsupported xform chain requested");
2272                 return -ENOTSUP;
2273         }
2274         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2275
2276         ret = qat_sym_session_configure_cipher(dev, xform, session);
2277         if (ret < 0)
2278                 return ret;
2279         qat_sym_session_finalize(session);
2280
2281         return 0;
2282 }
2283
2284 int
2285 qat_security_session_create(void *dev,
2286                                 struct rte_security_session_conf *conf,
2287                                 struct rte_security_session *sess,
2288                                 struct rte_mempool *mempool)
2289 {
2290         void *sess_private_data;
2291         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2292         int ret;
2293
2294         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2295                         conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2296                 QAT_LOG(ERR, "Invalid security protocol");
2297                 return -EINVAL;
2298         }
2299
2300         if (rte_mempool_get(mempool, &sess_private_data)) {
2301                 QAT_LOG(ERR, "Couldn't get object from session mempool");
2302                 return -ENOMEM;
2303         }
2304
2305         ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2306                         sess_private_data);
2307         if (ret != 0) {
2308                 QAT_LOG(ERR, "Failed to configure session parameters");
2309                 /* Return session to mempool */
2310                 rte_mempool_put(mempool, sess_private_data);
2311                 return ret;
2312         }
2313
2314         set_sec_session_private_data(sess, sess_private_data);
2315
2316         return ret;
2317 }
2318
2319 int
2320 qat_security_session_destroy(void *dev __rte_unused,
2321                                  struct rte_security_session *sess)
2322 {
2323         void *sess_priv = get_sec_session_private_data(sess);
2324         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2325
2326         if (sess_priv) {
2327                 if (s->bpi_ctx)
2328                         bpi_cipher_ctx_free(s->bpi_ctx);
2329                 memset(s, 0, qat_sym_session_get_private_size(dev));
2330                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2331
2332                 set_sec_session_private_data(sess, NULL);
2333                 rte_mempool_put(sess_mp, sess_priv);
2334         }
2335         return 0;
2336 }
2337 #endif