crypto/qat: add AES-GCM in UCS-SPC mode
[dpdk.git] / drivers / crypto / qat / qat_sym_session.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  * Copyright(c) 2015-2019 Intel Corporation
3  */
4
5 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
6 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
7 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
8 #include <openssl/evp.h>        /* Needed for bpi runt block processing */
9
10 #include <rte_memcpy.h>
11 #include <rte_common.h>
12 #include <rte_spinlock.h>
13 #include <rte_byteorder.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_crypto_sym.h>
17 #ifdef RTE_LIB_SECURITY
18 #include <rte_security.h>
19 #endif
20
21 #include "qat_logs.h"
22 #include "qat_sym_session.h"
23 #include "qat_sym_pmd.h"
24
25 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
26 static const uint8_t sha1InitialState[] = {
27         0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
28         0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
29
30 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
31 static const uint8_t sha224InitialState[] = {
32         0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
33         0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
34         0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
35
36 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
37 static const uint8_t sha256InitialState[] = {
38         0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
39         0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
40         0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
41
42 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
43 static const uint8_t sha384InitialState[] = {
44         0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
45         0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
46         0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
47         0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
48         0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
49         0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
50
51 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
52 static const uint8_t sha512InitialState[] = {
53         0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
54         0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
55         0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
56         0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
57         0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
58         0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
59
60 static int
61 qat_sym_cd_cipher_set(struct qat_sym_session *cd,
62                                                 const uint8_t *enckey,
63                                                 uint32_t enckeylen);
64
65 static int
66 qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
67                                                 const uint8_t *authkey,
68                                                 uint32_t authkeylen,
69                                                 uint32_t aad_length,
70                                                 uint32_t digestsize,
71                                                 unsigned int operation);
72 static void
73 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
74
75 /* Req/cd init functions */
76
77 static void
78 qat_sym_session_finalize(struct qat_sym_session *session)
79 {
80         qat_sym_session_init_common_hdr(session);
81 }
82
83 /** Frees a context previously created
84  *  Depends on openssl libcrypto
85  */
86 static void
87 bpi_cipher_ctx_free(void *bpi_ctx)
88 {
89         if (bpi_ctx != NULL)
90                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
91 }
92
93 /** Creates a context in either AES or DES in ECB mode
94  *  Depends on openssl libcrypto
95  */
96 static int
97 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
98                 enum rte_crypto_cipher_operation direction __rte_unused,
99                 const uint8_t *key, uint16_t key_length, void **ctx)
100 {
101         const EVP_CIPHER *algo = NULL;
102         int ret;
103         *ctx = EVP_CIPHER_CTX_new();
104
105         if (*ctx == NULL) {
106                 ret = -ENOMEM;
107                 goto ctx_init_err;
108         }
109
110         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
111                 algo = EVP_des_ecb();
112         else
113                 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
114                         algo = EVP_aes_128_ecb();
115                 else
116                         algo = EVP_aes_256_ecb();
117
118         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
119         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
120                 ret = -EINVAL;
121                 goto ctx_init_err;
122         }
123
124         return 0;
125
126 ctx_init_err:
127         if (*ctx != NULL)
128                 EVP_CIPHER_CTX_free(*ctx);
129         return ret;
130 }
131
132 static int
133 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
134                 struct qat_sym_dev_private *internals)
135 {
136         int i = 0;
137         const struct rte_cryptodev_capabilities *capability;
138
139         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
140                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
141                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
142                         continue;
143
144                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
145                         continue;
146
147                 if (capability->sym.cipher.algo == algo)
148                         return 1;
149         }
150         return 0;
151 }
152
153 static int
154 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
155                 struct qat_sym_dev_private *internals)
156 {
157         int i = 0;
158         const struct rte_cryptodev_capabilities *capability;
159
160         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
161                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
162                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
163                         continue;
164
165                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
166                         continue;
167
168                 if (capability->sym.auth.algo == algo)
169                         return 1;
170         }
171         return 0;
172 }
173
174 void
175 qat_sym_session_clear(struct rte_cryptodev *dev,
176                 struct rte_cryptodev_sym_session *sess)
177 {
178         uint8_t index = dev->driver_id;
179         void *sess_priv = get_sym_session_private_data(sess, index);
180         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
181
182         if (sess_priv) {
183                 if (s->bpi_ctx)
184                         bpi_cipher_ctx_free(s->bpi_ctx);
185                 memset(s, 0, qat_sym_session_get_private_size(dev));
186                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
187
188                 set_sym_session_private_data(sess, index, NULL);
189                 rte_mempool_put(sess_mp, sess_priv);
190         }
191 }
192
193 static int
194 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
195 {
196         /* Cipher Only */
197         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
198                 return ICP_QAT_FW_LA_CMD_CIPHER;
199
200         /* Authentication Only */
201         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
202                 return ICP_QAT_FW_LA_CMD_AUTH;
203
204         /* AEAD */
205         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
206                 /* AES-GCM and AES-CCM works with different direction
207                  * GCM first encrypts and generate hash where AES-CCM
208                  * first generate hash and encrypts. Similar relation
209                  * applies to decryption.
210                  */
211                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
212                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
213                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
214                         else
215                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
216                 else
217                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
218                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
219                         else
220                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
221         }
222
223         if (xform->next == NULL)
224                 return -1;
225
226         /* Cipher then Authenticate */
227         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
228                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
229                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
230
231         /* Authenticate then Cipher */
232         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
233                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
234                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
235
236         return -1;
237 }
238
239 static struct rte_crypto_auth_xform *
240 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
241 {
242         do {
243                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
244                         return &xform->auth;
245
246                 xform = xform->next;
247         } while (xform);
248
249         return NULL;
250 }
251
252 static struct rte_crypto_cipher_xform *
253 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
254 {
255         do {
256                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
257                         return &xform->cipher;
258
259                 xform = xform->next;
260         } while (xform);
261
262         return NULL;
263 }
264
265 int
266 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
267                 struct rte_crypto_sym_xform *xform,
268                 struct qat_sym_session *session)
269 {
270         struct qat_sym_dev_private *internals = dev->data->dev_private;
271         struct rte_crypto_cipher_xform *cipher_xform = NULL;
272         enum qat_device_gen qat_dev_gen =
273                                 internals->qat_dev->qat_dev_gen;
274         int ret;
275
276         /* Get cipher xform from crypto xform chain */
277         cipher_xform = qat_get_cipher_xform(xform);
278
279         session->cipher_iv.offset = cipher_xform->iv.offset;
280         session->cipher_iv.length = cipher_xform->iv.length;
281
282         switch (cipher_xform->algo) {
283         case RTE_CRYPTO_CIPHER_AES_CBC:
284                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
285                                 &session->qat_cipher_alg) != 0) {
286                         QAT_LOG(ERR, "Invalid AES cipher key size");
287                         ret = -EINVAL;
288                         goto error_out;
289                 }
290                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
291                 break;
292         case RTE_CRYPTO_CIPHER_AES_CTR:
293                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
294                                 &session->qat_cipher_alg) != 0) {
295                         QAT_LOG(ERR, "Invalid AES cipher key size");
296                         ret = -EINVAL;
297                         goto error_out;
298                 }
299                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
300                 if (qat_dev_gen == QAT_GEN4)
301                         session->is_ucs = 1;
302                 break;
303         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
304                 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
305                                         &session->qat_cipher_alg) != 0) {
306                         QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
307                         ret = -EINVAL;
308                         goto error_out;
309                 }
310                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
311                 break;
312         case RTE_CRYPTO_CIPHER_NULL:
313                 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
314                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
315                 break;
316         case RTE_CRYPTO_CIPHER_KASUMI_F8:
317                 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
318                                         &session->qat_cipher_alg) != 0) {
319                         QAT_LOG(ERR, "Invalid KASUMI cipher key size");
320                         ret = -EINVAL;
321                         goto error_out;
322                 }
323                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
324                 break;
325         case RTE_CRYPTO_CIPHER_3DES_CBC:
326                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
327                                 &session->qat_cipher_alg) != 0) {
328                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
329                         ret = -EINVAL;
330                         goto error_out;
331                 }
332                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
333                 break;
334         case RTE_CRYPTO_CIPHER_DES_CBC:
335                 if (qat_sym_validate_des_key(cipher_xform->key.length,
336                                 &session->qat_cipher_alg) != 0) {
337                         QAT_LOG(ERR, "Invalid DES cipher key size");
338                         ret = -EINVAL;
339                         goto error_out;
340                 }
341                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
342                 break;
343         case RTE_CRYPTO_CIPHER_3DES_CTR:
344                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
345                                 &session->qat_cipher_alg) != 0) {
346                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
347                         ret = -EINVAL;
348                         goto error_out;
349                 }
350                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
351                 break;
352         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
353                 ret = bpi_cipher_ctx_init(
354                                         cipher_xform->algo,
355                                         cipher_xform->op,
356                                         cipher_xform->key.data,
357                                         cipher_xform->key.length,
358                                         &session->bpi_ctx);
359                 if (ret != 0) {
360                         QAT_LOG(ERR, "failed to create DES BPI ctx");
361                         goto error_out;
362                 }
363                 if (qat_sym_validate_des_key(cipher_xform->key.length,
364                                 &session->qat_cipher_alg) != 0) {
365                         QAT_LOG(ERR, "Invalid DES cipher key size");
366                         ret = -EINVAL;
367                         goto error_out;
368                 }
369                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
370                 break;
371         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
372                 ret = bpi_cipher_ctx_init(
373                                         cipher_xform->algo,
374                                         cipher_xform->op,
375                                         cipher_xform->key.data,
376                                         cipher_xform->key.length,
377                                         &session->bpi_ctx);
378                 if (ret != 0) {
379                         QAT_LOG(ERR, "failed to create AES BPI ctx");
380                         goto error_out;
381                 }
382                 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
383                                 &session->qat_cipher_alg) != 0) {
384                         QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
385                         ret = -EINVAL;
386                         goto error_out;
387                 }
388                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
389                 break;
390         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
391                 if (!qat_is_cipher_alg_supported(
392                         cipher_xform->algo, internals)) {
393                         QAT_LOG(ERR, "%s not supported on this device",
394                                 rte_crypto_cipher_algorithm_strings
395                                         [cipher_xform->algo]);
396                         ret = -ENOTSUP;
397                         goto error_out;
398                 }
399                 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
400                                 &session->qat_cipher_alg) != 0) {
401                         QAT_LOG(ERR, "Invalid ZUC cipher key size");
402                         ret = -EINVAL;
403                         goto error_out;
404                 }
405                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
406                 break;
407         case RTE_CRYPTO_CIPHER_AES_XTS:
408                 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
409                         QAT_LOG(ERR, "AES-XTS-192 not supported");
410                         ret = -EINVAL;
411                         goto error_out;
412                 }
413                 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
414                                 &session->qat_cipher_alg) != 0) {
415                         QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
416                         ret = -EINVAL;
417                         goto error_out;
418                 }
419                 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
420                 break;
421         case RTE_CRYPTO_CIPHER_3DES_ECB:
422         case RTE_CRYPTO_CIPHER_AES_ECB:
423         case RTE_CRYPTO_CIPHER_AES_F8:
424         case RTE_CRYPTO_CIPHER_ARC4:
425                 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
426                                 cipher_xform->algo);
427                 ret = -ENOTSUP;
428                 goto error_out;
429         default:
430                 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
431                                 cipher_xform->algo);
432                 ret = -EINVAL;
433                 goto error_out;
434         }
435
436         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
437                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
438         else
439                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
440
441         if (qat_sym_cd_cipher_set(session,
442                                                 cipher_xform->key.data,
443                                                 cipher_xform->key.length)) {
444                 ret = -EINVAL;
445                 goto error_out;
446         }
447
448         return 0;
449
450 error_out:
451         if (session->bpi_ctx) {
452                 bpi_cipher_ctx_free(session->bpi_ctx);
453                 session->bpi_ctx = NULL;
454         }
455         return ret;
456 }
457
458 int
459 qat_sym_session_configure(struct rte_cryptodev *dev,
460                 struct rte_crypto_sym_xform *xform,
461                 struct rte_cryptodev_sym_session *sess,
462                 struct rte_mempool *mempool)
463 {
464         void *sess_private_data;
465         int ret;
466
467         if (rte_mempool_get(mempool, &sess_private_data)) {
468                 CDEV_LOG_ERR(
469                         "Couldn't get object from session mempool");
470                 return -ENOMEM;
471         }
472
473         ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
474         if (ret != 0) {
475                 QAT_LOG(ERR,
476                     "Crypto QAT PMD: failed to configure session parameters");
477
478                 /* Return session to mempool */
479                 rte_mempool_put(mempool, sess_private_data);
480                 return ret;
481         }
482
483         set_sym_session_private_data(sess, dev->driver_id,
484                 sess_private_data);
485
486         return 0;
487 }
488
489 static void
490 qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
491                 uint8_t hash_flag)
492 {
493         struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
494         struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
495                         (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
496                         session->fw_req.cd_ctrl.content_desc_ctrl_lw;
497
498         /* Set the Use Extended Protocol Flags bit in LW 1 */
499         QAT_FIELD_SET(header->comn_req_flags,
500                         QAT_COMN_EXT_FLAGS_USED,
501                         QAT_COMN_EXT_FLAGS_BITPOS,
502                         QAT_COMN_EXT_FLAGS_MASK);
503
504         /* Set Hash Flags in LW 28 */
505         cd_ctrl->hash_flags |= hash_flag;
506
507         /* Set proto flags in LW 1 */
508         switch (session->qat_cipher_alg) {
509         case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
510                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
511                                 ICP_QAT_FW_LA_SNOW_3G_PROTO);
512                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
513                                 header->serv_specif_flags, 0);
514                 break;
515         case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
516                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
517                                 ICP_QAT_FW_LA_NO_PROTO);
518                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
519                                 header->serv_specif_flags,
520                                 ICP_QAT_FW_LA_ZUC_3G_PROTO);
521                 break;
522         default:
523                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
524                                 ICP_QAT_FW_LA_NO_PROTO);
525                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
526                                 header->serv_specif_flags, 0);
527                 break;
528         }
529 }
530
531 static void
532 qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
533                 struct qat_sym_session *session)
534 {
535         const struct qat_sym_dev_private *qat_private = dev->data->dev_private;
536         enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
537                         QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
538
539         if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
540                         session->qat_cipher_alg !=
541                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
542                 session->min_qat_dev_gen = min_dev_gen;
543                 qat_sym_session_set_ext_hash_flags(session,
544                         1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
545         } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
546                         session->qat_cipher_alg !=
547                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
548                 session->min_qat_dev_gen = min_dev_gen;
549                 qat_sym_session_set_ext_hash_flags(session,
550                         1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
551         } else if ((session->aes_cmac ||
552                         session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
553                         (session->qat_cipher_alg ==
554                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
555                         session->qat_cipher_alg ==
556                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
557                 session->min_qat_dev_gen = min_dev_gen;
558                 qat_sym_session_set_ext_hash_flags(session, 0);
559         }
560 }
561
562 int
563 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
564                 struct rte_crypto_sym_xform *xform, void *session_private)
565 {
566         struct qat_sym_session *session = session_private;
567         struct qat_sym_dev_private *internals = dev->data->dev_private;
568         enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
569         int ret;
570         int qat_cmd_id;
571         int handle_mixed = 0;
572
573         /* Verify the session physical address is known */
574         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
575         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
576                 QAT_LOG(ERR,
577                         "Session physical address unknown. Bad memory pool.");
578                 return -EINVAL;
579         }
580
581         memset(session, 0, sizeof(*session));
582         /* Set context descriptor physical address */
583         session->cd_paddr = session_paddr +
584                         offsetof(struct qat_sym_session, cd);
585
586         session->min_qat_dev_gen = QAT_GEN1;
587         session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
588         session->is_ucs = 0;
589
590         /* Get requested QAT command id */
591         qat_cmd_id = qat_get_cmd_id(xform);
592         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
593                 QAT_LOG(ERR, "Unsupported xform chain requested");
594                 return -ENOTSUP;
595         }
596         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
597         switch (session->qat_cmd) {
598         case ICP_QAT_FW_LA_CMD_CIPHER:
599                 ret = qat_sym_session_configure_cipher(dev, xform, session);
600                 if (ret < 0)
601                         return ret;
602                 break;
603         case ICP_QAT_FW_LA_CMD_AUTH:
604                 ret = qat_sym_session_configure_auth(dev, xform, session);
605                 if (ret < 0)
606                         return ret;
607                 session->is_single_pass_gmac =
608                                qat_dev_gen == QAT_GEN3 &&
609                                xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
610                                xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
611                 break;
612         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
613                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
614                         ret = qat_sym_session_configure_aead(dev, xform,
615                                         session);
616                         if (ret < 0)
617                                 return ret;
618                 } else {
619                         ret = qat_sym_session_configure_cipher(dev,
620                                         xform, session);
621                         if (ret < 0)
622                                 return ret;
623                         ret = qat_sym_session_configure_auth(dev,
624                                         xform, session);
625                         if (ret < 0)
626                                 return ret;
627                         handle_mixed = 1;
628                 }
629                 break;
630         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
631                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
632                         ret = qat_sym_session_configure_aead(dev, xform,
633                                         session);
634                         if (ret < 0)
635                                 return ret;
636                 } else {
637                         ret = qat_sym_session_configure_auth(dev,
638                                         xform, session);
639                         if (ret < 0)
640                                 return ret;
641                         ret = qat_sym_session_configure_cipher(dev,
642                                         xform, session);
643                         if (ret < 0)
644                                 return ret;
645                         handle_mixed = 1;
646                 }
647                 break;
648         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
649         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
650         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
651         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
652         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
653         case ICP_QAT_FW_LA_CMD_MGF1:
654         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
655         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
656         case ICP_QAT_FW_LA_CMD_DELIMITER:
657         QAT_LOG(ERR, "Unsupported Service %u",
658                 session->qat_cmd);
659                 return -ENOTSUP;
660         default:
661         QAT_LOG(ERR, "Unsupported Service %u",
662                 session->qat_cmd);
663                 return -ENOTSUP;
664         }
665         qat_sym_session_finalize(session);
666         if (handle_mixed) {
667                 /* Special handling of mixed hash+cipher algorithms */
668                 qat_sym_session_handle_mixed(dev, session);
669         }
670
671         return 0;
672 }
673
674 static int
675 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
676                 const struct rte_crypto_aead_xform *aead_xform)
677 {
678         session->is_single_pass = 1;
679         session->is_auth = 1;
680         session->min_qat_dev_gen = QAT_GEN3;
681         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
682         /* Chacha-Poly is special case that use QAT CTR mode */
683         if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
684                 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
685         } else {
686                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
687         }
688         session->cipher_iv.offset = aead_xform->iv.offset;
689         session->cipher_iv.length = aead_xform->iv.length;
690         session->aad_len = aead_xform->aad_length;
691         session->digest_length = aead_xform->digest_length;
692
693         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
694                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
695                 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
696         } else {
697                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
698                 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
699         }
700
701         return 0;
702 }
703
704 int
705 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
706                                 struct rte_crypto_sym_xform *xform,
707                                 struct qat_sym_session *session)
708 {
709         struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
710         struct qat_sym_dev_private *internals = dev->data->dev_private;
711         const uint8_t *key_data = auth_xform->key.data;
712         uint8_t key_length = auth_xform->key.length;
713
714         session->aes_cmac = 0;
715         session->auth_key_length = auth_xform->key.length;
716         session->auth_iv.offset = auth_xform->iv.offset;
717         session->auth_iv.length = auth_xform->iv.length;
718         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
719         session->is_auth = 1;
720
721         switch (auth_xform->algo) {
722         case RTE_CRYPTO_AUTH_SHA1:
723                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
724                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
725                 break;
726         case RTE_CRYPTO_AUTH_SHA224:
727                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
728                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
729                 break;
730         case RTE_CRYPTO_AUTH_SHA256:
731                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
732                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
733                 break;
734         case RTE_CRYPTO_AUTH_SHA384:
735                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
736                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
737                 break;
738         case RTE_CRYPTO_AUTH_SHA512:
739                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
740                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
741                 break;
742         case RTE_CRYPTO_AUTH_SHA1_HMAC:
743                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
744                 break;
745         case RTE_CRYPTO_AUTH_SHA224_HMAC:
746                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
747                 break;
748         case RTE_CRYPTO_AUTH_SHA256_HMAC:
749                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
750                 break;
751         case RTE_CRYPTO_AUTH_SHA384_HMAC:
752                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
753                 break;
754         case RTE_CRYPTO_AUTH_SHA512_HMAC:
755                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
756                 break;
757         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
758                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
759                 break;
760         case RTE_CRYPTO_AUTH_AES_CMAC:
761                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
762                 session->aes_cmac = 1;
763                 break;
764         case RTE_CRYPTO_AUTH_AES_GMAC:
765                 if (qat_sym_validate_aes_key(auth_xform->key.length,
766                                 &session->qat_cipher_alg) != 0) {
767                         QAT_LOG(ERR, "Invalid AES key size");
768                         return -EINVAL;
769                 }
770                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
771                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
772                 if (session->auth_iv.length == 0)
773                         session->auth_iv.length = AES_GCM_J0_LEN;
774                 else
775                         session->is_iv12B = 1;
776                 break;
777         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
778                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
779                 break;
780         case RTE_CRYPTO_AUTH_MD5_HMAC:
781                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
782                 break;
783         case RTE_CRYPTO_AUTH_NULL:
784                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
785                 break;
786         case RTE_CRYPTO_AUTH_KASUMI_F9:
787                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
788                 break;
789         case RTE_CRYPTO_AUTH_ZUC_EIA3:
790                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
791                         QAT_LOG(ERR, "%s not supported on this device",
792                                 rte_crypto_auth_algorithm_strings
793                                 [auth_xform->algo]);
794                         return -ENOTSUP;
795                 }
796                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
797                 break;
798         case RTE_CRYPTO_AUTH_MD5:
799         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
800                 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
801                                 auth_xform->algo);
802                 return -ENOTSUP;
803         default:
804                 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
805                                 auth_xform->algo);
806                 return -EINVAL;
807         }
808
809         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
810                 session->is_gmac = 1;
811                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
812                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
813                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
814                         /*
815                          * It needs to create cipher desc content first,
816                          * then authentication
817                          */
818                         if (qat_sym_cd_cipher_set(session,
819                                                 auth_xform->key.data,
820                                                 auth_xform->key.length))
821                                 return -EINVAL;
822
823                         if (qat_sym_cd_auth_set(session,
824                                                 key_data,
825                                                 key_length,
826                                                 0,
827                                                 auth_xform->digest_length,
828                                                 auth_xform->op))
829                                 return -EINVAL;
830                 } else {
831                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
832                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
833                         /*
834                          * It needs to create authentication desc content first,
835                          * then cipher
836                          */
837
838                         if (qat_sym_cd_auth_set(session,
839                                         key_data,
840                                         key_length,
841                                         0,
842                                         auth_xform->digest_length,
843                                         auth_xform->op))
844                                 return -EINVAL;
845
846                         if (qat_sym_cd_cipher_set(session,
847                                                 auth_xform->key.data,
848                                                 auth_xform->key.length))
849                                 return -EINVAL;
850                 }
851         } else {
852                 if (qat_sym_cd_auth_set(session,
853                                 key_data,
854                                 key_length,
855                                 0,
856                                 auth_xform->digest_length,
857                                 auth_xform->op))
858                         return -EINVAL;
859         }
860
861         session->digest_length = auth_xform->digest_length;
862         return 0;
863 }
864
865 int
866 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
867                                 struct rte_crypto_sym_xform *xform,
868                                 struct qat_sym_session *session)
869 {
870         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
871         enum rte_crypto_auth_operation crypto_operation;
872         struct qat_sym_dev_private *internals =
873                         dev->data->dev_private;
874         enum qat_device_gen qat_dev_gen =
875                         internals->qat_dev->qat_dev_gen;
876
877         /*
878          * Store AEAD IV parameters as cipher IV,
879          * to avoid unnecessary memory usage
880          */
881         session->cipher_iv.offset = xform->aead.iv.offset;
882         session->cipher_iv.length = xform->aead.iv.length;
883
884         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
885         session->is_auth = 1;
886         session->digest_length = aead_xform->digest_length;
887
888         session->is_single_pass = 0;
889         switch (aead_xform->algo) {
890         case RTE_CRYPTO_AEAD_AES_GCM:
891                 if (qat_sym_validate_aes_key(aead_xform->key.length,
892                                 &session->qat_cipher_alg) != 0) {
893                         QAT_LOG(ERR, "Invalid AES key size");
894                         return -EINVAL;
895                 }
896                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
897                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
898
899                 if (qat_dev_gen == QAT_GEN4)
900                         session->is_ucs = 1;
901                 if (session->cipher_iv.length == 0) {
902                         session->cipher_iv.length = AES_GCM_J0_LEN;
903                         break;
904                 }
905                 session->is_iv12B = 1;
906                 if (qat_dev_gen < QAT_GEN3)
907                         break;
908                 qat_sym_session_handle_single_pass(session,
909                                 aead_xform);
910                 break;
911         case RTE_CRYPTO_AEAD_AES_CCM:
912                 if (qat_sym_validate_aes_key(aead_xform->key.length,
913                                 &session->qat_cipher_alg) != 0) {
914                         QAT_LOG(ERR, "Invalid AES key size");
915                         return -EINVAL;
916                 }
917                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
918                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
919                 if (qat_dev_gen == QAT_GEN4)
920                         session->is_ucs = 1;
921                 break;
922         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
923                 if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
924                         return -EINVAL;
925                 session->qat_cipher_alg =
926                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
927                 qat_sym_session_handle_single_pass(session,
928                                                 aead_xform);
929                 break;
930         default:
931                 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
932                                 aead_xform->algo);
933                 return -EINVAL;
934         }
935
936         if (session->is_single_pass) {
937                 if (qat_sym_cd_cipher_set(session,
938                                 aead_xform->key.data, aead_xform->key.length))
939                         return -EINVAL;
940         } else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
941                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
942                         (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
943                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
944                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
945                 /*
946                  * It needs to create cipher desc content first,
947                  * then authentication
948                  */
949                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
950                         RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
951
952                 if (qat_sym_cd_cipher_set(session,
953                                         aead_xform->key.data,
954                                         aead_xform->key.length))
955                         return -EINVAL;
956
957                 if (qat_sym_cd_auth_set(session,
958                                         aead_xform->key.data,
959                                         aead_xform->key.length,
960                                         aead_xform->aad_length,
961                                         aead_xform->digest_length,
962                                         crypto_operation))
963                         return -EINVAL;
964         } else {
965                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
966                 /*
967                  * It needs to create authentication desc content first,
968                  * then cipher
969                  */
970
971                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
972                         RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
973
974                 if (qat_sym_cd_auth_set(session,
975                                         aead_xform->key.data,
976                                         aead_xform->key.length,
977                                         aead_xform->aad_length,
978                                         aead_xform->digest_length,
979                                         crypto_operation))
980                         return -EINVAL;
981
982                 if (qat_sym_cd_cipher_set(session,
983                                         aead_xform->key.data,
984                                         aead_xform->key.length))
985                         return -EINVAL;
986         }
987
988         return 0;
989 }
990
991 unsigned int qat_sym_session_get_private_size(
992                 struct rte_cryptodev *dev __rte_unused)
993 {
994         return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
995 }
996
997 /* returns block size in bytes per cipher algo */
998 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
999 {
1000         switch (qat_cipher_alg) {
1001         case ICP_QAT_HW_CIPHER_ALGO_DES:
1002                 return ICP_QAT_HW_DES_BLK_SZ;
1003         case ICP_QAT_HW_CIPHER_ALGO_3DES:
1004                 return ICP_QAT_HW_3DES_BLK_SZ;
1005         case ICP_QAT_HW_CIPHER_ALGO_AES128:
1006         case ICP_QAT_HW_CIPHER_ALGO_AES192:
1007         case ICP_QAT_HW_CIPHER_ALGO_AES256:
1008                 return ICP_QAT_HW_AES_BLK_SZ;
1009         default:
1010                 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
1011                 return -EFAULT;
1012         };
1013         return -EFAULT;
1014 }
1015
1016 /*
1017  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
1018  * This is digest size rounded up to nearest quadword
1019  */
1020 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1021 {
1022         switch (qat_hash_alg) {
1023         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1024                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
1025                                                 QAT_HW_DEFAULT_ALIGNMENT);
1026         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1027                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
1028                                                 QAT_HW_DEFAULT_ALIGNMENT);
1029         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1030                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
1031                                                 QAT_HW_DEFAULT_ALIGNMENT);
1032         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1033                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
1034                                                 QAT_HW_DEFAULT_ALIGNMENT);
1035         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1036                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1037                                                 QAT_HW_DEFAULT_ALIGNMENT);
1038         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1039                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
1040                                                 QAT_HW_DEFAULT_ALIGNMENT);
1041         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1042         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1043                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1044                                                 QAT_HW_DEFAULT_ALIGNMENT);
1045         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1046                 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1047                                                 QAT_HW_DEFAULT_ALIGNMENT);
1048         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1049                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1050                                                 QAT_HW_DEFAULT_ALIGNMENT);
1051         case ICP_QAT_HW_AUTH_ALGO_MD5:
1052                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1053                                                 QAT_HW_DEFAULT_ALIGNMENT);
1054         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1055                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1056                                                 QAT_HW_DEFAULT_ALIGNMENT);
1057         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1058                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1059                                                 QAT_HW_DEFAULT_ALIGNMENT);
1060         case ICP_QAT_HW_AUTH_ALGO_NULL:
1061                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1062                                                 QAT_HW_DEFAULT_ALIGNMENT);
1063         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1064                 /* return maximum state1 size in this case */
1065                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1066                                                 QAT_HW_DEFAULT_ALIGNMENT);
1067         default:
1068                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1069                 return -EFAULT;
1070         };
1071         return -EFAULT;
1072 }
1073
1074 /* returns digest size in bytes  per hash algo */
1075 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1076 {
1077         switch (qat_hash_alg) {
1078         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1079                 return ICP_QAT_HW_SHA1_STATE1_SZ;
1080         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1081                 return ICP_QAT_HW_SHA224_STATE1_SZ;
1082         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1083                 return ICP_QAT_HW_SHA256_STATE1_SZ;
1084         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1085                 return ICP_QAT_HW_SHA384_STATE1_SZ;
1086         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1087                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1088         case ICP_QAT_HW_AUTH_ALGO_MD5:
1089                 return ICP_QAT_HW_MD5_STATE1_SZ;
1090         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1091                 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1092         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1093                 /* return maximum digest size in this case */
1094                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1095         default:
1096                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1097                 return -EFAULT;
1098         };
1099         return -EFAULT;
1100 }
1101
1102 /* returns block size in byes per hash algo */
1103 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1104 {
1105         switch (qat_hash_alg) {
1106         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1107                 return SHA_CBLOCK;
1108         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1109                 return SHA256_CBLOCK;
1110         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1111                 return SHA256_CBLOCK;
1112         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1113                 return SHA512_CBLOCK;
1114         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1115                 return SHA512_CBLOCK;
1116         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1117                 return 16;
1118         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1119                 return ICP_QAT_HW_AES_BLK_SZ;
1120         case ICP_QAT_HW_AUTH_ALGO_MD5:
1121                 return MD5_CBLOCK;
1122         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1123                 /* return maximum block size in this case */
1124                 return SHA512_CBLOCK;
1125         default:
1126                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1127                 return -EFAULT;
1128         };
1129         return -EFAULT;
1130 }
1131
1132 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1133 {
1134         SHA_CTX ctx;
1135
1136         if (!SHA1_Init(&ctx))
1137                 return -EFAULT;
1138         SHA1_Transform(&ctx, data_in);
1139         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1140         return 0;
1141 }
1142
1143 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1144 {
1145         SHA256_CTX ctx;
1146
1147         if (!SHA224_Init(&ctx))
1148                 return -EFAULT;
1149         SHA256_Transform(&ctx, data_in);
1150         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1151         return 0;
1152 }
1153
1154 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1155 {
1156         SHA256_CTX ctx;
1157
1158         if (!SHA256_Init(&ctx))
1159                 return -EFAULT;
1160         SHA256_Transform(&ctx, data_in);
1161         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1162         return 0;
1163 }
1164
1165 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1166 {
1167         SHA512_CTX ctx;
1168
1169         if (!SHA384_Init(&ctx))
1170                 return -EFAULT;
1171         SHA512_Transform(&ctx, data_in);
1172         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1173         return 0;
1174 }
1175
1176 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1177 {
1178         SHA512_CTX ctx;
1179
1180         if (!SHA512_Init(&ctx))
1181                 return -EFAULT;
1182         SHA512_Transform(&ctx, data_in);
1183         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1184         return 0;
1185 }
1186
1187 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1188 {
1189         MD5_CTX ctx;
1190
1191         if (!MD5_Init(&ctx))
1192                 return -EFAULT;
1193         MD5_Transform(&ctx, data_in);
1194         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1195
1196         return 0;
1197 }
1198
1199 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1200                         uint8_t *data_in,
1201                         uint8_t *data_out)
1202 {
1203         int digest_size;
1204         uint8_t digest[qat_hash_get_digest_size(
1205                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1206         uint32_t *hash_state_out_be32;
1207         uint64_t *hash_state_out_be64;
1208         int i;
1209
1210         /* Initialize to avoid gcc warning */
1211         memset(digest, 0, sizeof(digest));
1212
1213         digest_size = qat_hash_get_digest_size(hash_alg);
1214         if (digest_size <= 0)
1215                 return -EFAULT;
1216
1217         hash_state_out_be32 = (uint32_t *)data_out;
1218         hash_state_out_be64 = (uint64_t *)data_out;
1219
1220         switch (hash_alg) {
1221         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1222                 if (partial_hash_sha1(data_in, digest))
1223                         return -EFAULT;
1224                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1225                         *hash_state_out_be32 =
1226                                 rte_bswap32(*(((uint32_t *)digest)+i));
1227                 break;
1228         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1229                 if (partial_hash_sha224(data_in, digest))
1230                         return -EFAULT;
1231                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1232                         *hash_state_out_be32 =
1233                                 rte_bswap32(*(((uint32_t *)digest)+i));
1234                 break;
1235         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1236                 if (partial_hash_sha256(data_in, digest))
1237                         return -EFAULT;
1238                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1239                         *hash_state_out_be32 =
1240                                 rte_bswap32(*(((uint32_t *)digest)+i));
1241                 break;
1242         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1243                 if (partial_hash_sha384(data_in, digest))
1244                         return -EFAULT;
1245                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1246                         *hash_state_out_be64 =
1247                                 rte_bswap64(*(((uint64_t *)digest)+i));
1248                 break;
1249         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1250                 if (partial_hash_sha512(data_in, digest))
1251                         return -EFAULT;
1252                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1253                         *hash_state_out_be64 =
1254                                 rte_bswap64(*(((uint64_t *)digest)+i));
1255                 break;
1256         case ICP_QAT_HW_AUTH_ALGO_MD5:
1257                 if (partial_hash_md5(data_in, data_out))
1258                         return -EFAULT;
1259                 break;
1260         default:
1261                 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1262                 return -EFAULT;
1263         }
1264
1265         return 0;
1266 }
1267 #define HMAC_IPAD_VALUE 0x36
1268 #define HMAC_OPAD_VALUE 0x5c
1269 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1270
1271 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1272
1273 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1274 {
1275         int i;
1276
1277         derived[0] = base[0] << 1;
1278         for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1279                 derived[i] = base[i] << 1;
1280                 derived[i - 1] |= base[i] >> 7;
1281         }
1282
1283         if (base[0] & 0x80)
1284                 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1285 }
1286
1287 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1288                                 const uint8_t *auth_key,
1289                                 uint16_t auth_keylen,
1290                                 uint8_t *p_state_buf,
1291                                 uint16_t *p_state_len,
1292                                 uint8_t aes_cmac)
1293 {
1294         int block_size;
1295         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1296         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1297         int i;
1298
1299         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1300
1301                 /* CMAC */
1302                 if (aes_cmac) {
1303                         AES_KEY enc_key;
1304                         uint8_t *in = NULL;
1305                         uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1306                         uint8_t *k1, *k2;
1307
1308                         auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1309
1310                         in = rte_zmalloc("AES CMAC K1",
1311                                          ICP_QAT_HW_AES_128_KEY_SZ, 16);
1312
1313                         if (in == NULL) {
1314                                 QAT_LOG(ERR, "Failed to alloc memory");
1315                                 return -ENOMEM;
1316                         }
1317
1318                         rte_memcpy(in, AES_CMAC_SEED,
1319                                    ICP_QAT_HW_AES_128_KEY_SZ);
1320                         rte_memcpy(p_state_buf, auth_key, auth_keylen);
1321
1322                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1323                                 &enc_key) != 0) {
1324                                 rte_free(in);
1325                                 return -EFAULT;
1326                         }
1327
1328                         AES_encrypt(in, k0, &enc_key);
1329
1330                         k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1331                         k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1332
1333                         aes_cmac_key_derive(k0, k1);
1334                         aes_cmac_key_derive(k1, k2);
1335
1336                         memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1337                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1338                         rte_free(in);
1339                         return 0;
1340                 } else {
1341                         static uint8_t qat_aes_xcbc_key_seed[
1342                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1343                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1344                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1345                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1346                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1347                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1348                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1349                         };
1350
1351                         uint8_t *in = NULL;
1352                         uint8_t *out = p_state_buf;
1353                         int x;
1354                         AES_KEY enc_key;
1355
1356                         in = rte_zmalloc("working mem for key",
1357                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1358                         if (in == NULL) {
1359                                 QAT_LOG(ERR, "Failed to alloc memory");
1360                                 return -ENOMEM;
1361                         }
1362
1363                         rte_memcpy(in, qat_aes_xcbc_key_seed,
1364                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1365                         for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1366                                 if (AES_set_encrypt_key(auth_key,
1367                                                         auth_keylen << 3,
1368                                                         &enc_key) != 0) {
1369                                         rte_free(in -
1370                                           (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1371                                         memset(out -
1372                                            (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1373                                           0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1374                                         return -EFAULT;
1375                                 }
1376                                 AES_encrypt(in, out, &enc_key);
1377                                 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1378                                 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1379                         }
1380                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1381                         rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1382                         return 0;
1383                 }
1384
1385         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1386                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1387                 uint8_t *in = NULL;
1388                 uint8_t *out = p_state_buf;
1389                 AES_KEY enc_key;
1390
1391                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1392                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1393                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1394                 in = rte_zmalloc("working mem for key",
1395                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
1396                 if (in == NULL) {
1397                         QAT_LOG(ERR, "Failed to alloc memory");
1398                         return -ENOMEM;
1399                 }
1400
1401                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1402                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1403                         &enc_key) != 0) {
1404                         return -EFAULT;
1405                 }
1406                 AES_encrypt(in, out, &enc_key);
1407                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1408                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1409                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1410                 rte_free(in);
1411                 return 0;
1412         }
1413
1414         block_size = qat_hash_get_block_size(hash_alg);
1415         if (block_size < 0)
1416                 return block_size;
1417         /* init ipad and opad from key and xor with fixed values */
1418         memset(ipad, 0, block_size);
1419         memset(opad, 0, block_size);
1420
1421         if (auth_keylen > (unsigned int)block_size) {
1422                 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1423                 return -EFAULT;
1424         }
1425         rte_memcpy(ipad, auth_key, auth_keylen);
1426         rte_memcpy(opad, auth_key, auth_keylen);
1427
1428         for (i = 0; i < block_size; i++) {
1429                 uint8_t *ipad_ptr = ipad + i;
1430                 uint8_t *opad_ptr = opad + i;
1431                 *ipad_ptr ^= HMAC_IPAD_VALUE;
1432                 *opad_ptr ^= HMAC_OPAD_VALUE;
1433         }
1434
1435         /* do partial hash of ipad and copy to state1 */
1436         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1437                 memset(ipad, 0, block_size);
1438                 memset(opad, 0, block_size);
1439                 QAT_LOG(ERR, "ipad precompute failed");
1440                 return -EFAULT;
1441         }
1442
1443         /*
1444          * State len is a multiple of 8, so may be larger than the digest.
1445          * Put the partial hash of opad state_len bytes after state1
1446          */
1447         *p_state_len = qat_hash_get_state1_size(hash_alg);
1448         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1449                 memset(ipad, 0, block_size);
1450                 memset(opad, 0, block_size);
1451                 QAT_LOG(ERR, "opad precompute failed");
1452                 return -EFAULT;
1453         }
1454
1455         /*  don't leave data lying around */
1456         memset(ipad, 0, block_size);
1457         memset(opad, 0, block_size);
1458         return 0;
1459 }
1460
1461 static void
1462 qat_sym_session_init_common_hdr(struct qat_sym_session *session)
1463 {
1464         struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
1465         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1466         enum qat_sym_proto_flag proto_flags = session->qat_proto_flag;
1467         uint32_t slice_flags = session->slice_types;
1468
1469         header->hdr_flags =
1470                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1471         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1472         header->service_cmd_id = session->qat_cmd;
1473         header->comn_req_flags =
1474                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1475                                         QAT_COMN_PTR_TYPE_FLAT);
1476         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1477                                   ICP_QAT_FW_LA_PARTIAL_NONE);
1478         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1479                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1480
1481         switch (proto_flags)            {
1482         case QAT_CRYPTO_PROTO_FLAG_NONE:
1483                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1484                                         ICP_QAT_FW_LA_NO_PROTO);
1485                 break;
1486         case QAT_CRYPTO_PROTO_FLAG_CCM:
1487                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1488                                         ICP_QAT_FW_LA_CCM_PROTO);
1489                 break;
1490         case QAT_CRYPTO_PROTO_FLAG_GCM:
1491                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1492                                         ICP_QAT_FW_LA_GCM_PROTO);
1493                 break;
1494         case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1495                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1496                                         ICP_QAT_FW_LA_SNOW_3G_PROTO);
1497                 break;
1498         case QAT_CRYPTO_PROTO_FLAG_ZUC:
1499                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1500                         ICP_QAT_FW_LA_ZUC_3G_PROTO);
1501                 break;
1502         }
1503
1504         /* More than one of the following flags can be set at once */
1505         if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_SPC)) {
1506                 ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
1507                         header->serv_specif_flags,
1508                         ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
1509         }
1510         if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_UCS)) {
1511                 ICP_QAT_FW_LA_SLICE_TYPE_SET(
1512                         header->serv_specif_flags,
1513                         ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
1514         }
1515
1516         if (session->is_auth) {
1517                 if (session->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1518                         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1519                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1520                         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1521                                         ICP_QAT_FW_LA_CMP_AUTH_RES);
1522                 } else if (session->auth_op == ICP_QAT_HW_AUTH_GENERATE) {
1523                         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1524                                                 ICP_QAT_FW_LA_RET_AUTH_RES);
1525                         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1526                                                 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1527                 }
1528         } else {
1529                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1530                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1531                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1532                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1533         }
1534
1535         if (session->is_iv12B) {
1536                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1537                         header->serv_specif_flags,
1538                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1539         }
1540
1541         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1542                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
1543         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1544                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1545 }
1546
1547 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
1548                                                 const uint8_t *cipherkey,
1549                                                 uint32_t cipherkeylen)
1550 {
1551         struct icp_qat_hw_cipher_algo_blk *cipher;
1552         struct icp_qat_hw_cipher_algo_blk20 *cipher20;
1553         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1554         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1555         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1556         void *ptr = &req_tmpl->cd_ctrl;
1557         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1558         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1559         enum icp_qat_hw_cipher_convert key_convert;
1560         struct icp_qat_fw_la_cipher_20_req_params *req_ucs =
1561                         (struct icp_qat_fw_la_cipher_20_req_params *)
1562                         &cdesc->fw_req.serv_specif_rqpars;
1563         struct icp_qat_fw_la_cipher_req_params *req_cipher =
1564                         (struct icp_qat_fw_la_cipher_req_params *)
1565                         &cdesc->fw_req.serv_specif_rqpars;
1566         uint32_t total_key_size;
1567         uint16_t cipher_offset, cd_size;
1568         uint32_t wordIndex  = 0;
1569         uint32_t *temp_key = NULL;
1570
1571         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1572                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1573                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1574                                         ICP_QAT_FW_SLICE_CIPHER);
1575                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1576                                         ICP_QAT_FW_SLICE_DRAM_WR);
1577                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1578                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1579                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1580                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1581                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1582         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1583                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1584                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1585                                         ICP_QAT_FW_SLICE_CIPHER);
1586                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1587                                         ICP_QAT_FW_SLICE_AUTH);
1588                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1589                                         ICP_QAT_FW_SLICE_AUTH);
1590                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1591                                         ICP_QAT_FW_SLICE_DRAM_WR);
1592                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1593         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1594                 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1595                 return -EFAULT;
1596         }
1597
1598         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1599                 /*
1600                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
1601                  * Overriding default values previously set.
1602                  * Chacha20-Poly1305 is special case, CTR but single-pass
1603                  * so both direction need to be used.
1604                  */
1605                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1606                 if (cdesc->qat_cipher_alg ==
1607                         ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 &&
1608                         cdesc->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1609                                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
1610                 }
1611                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1612         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1613                 || cdesc->qat_cipher_alg ==
1614                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1615                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1616         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1617                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1618         else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE)
1619                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1620         else
1621                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1622
1623         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1624                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1625                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1626                 cipher_cd_ctrl->cipher_state_sz =
1627                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1628                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1629
1630         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1631                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1632                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1633                 cipher_cd_ctrl->cipher_padding_sz =
1634                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1635         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1636                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1637                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1638         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1639                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1640                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1641         } else if (cdesc->qat_cipher_alg ==
1642                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1643                 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1644                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1645                 cipher_cd_ctrl->cipher_state_sz =
1646                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1647                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1648                 cdesc->min_qat_dev_gen = QAT_GEN2;
1649         } else {
1650                 total_key_size = cipherkeylen;
1651                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1652         }
1653         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1654         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1655
1656         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1657         cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
1658         cipher->cipher_config.val =
1659             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1660                                         cdesc->qat_cipher_alg, key_convert,
1661                                         cdesc->qat_dir);
1662
1663         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1664                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1665                                         sizeof(struct icp_qat_hw_cipher_config)
1666                                         + cipherkeylen);
1667                 memcpy(cipher->key, cipherkey, cipherkeylen);
1668                 memcpy(temp_key, cipherkey, cipherkeylen);
1669
1670                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1671                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1672                                                                 wordIndex++)
1673                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1674
1675                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1676                                         cipherkeylen + cipherkeylen;
1677         } else if (cdesc->is_ucs) {
1678                 const uint8_t *final_key = cipherkey;
1679
1680                 cdesc->slice_types |= QAT_CRYPTO_SLICE_UCS;
1681                 total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
1682                         ICP_QAT_HW_AES_128_KEY_SZ);
1683                 cipher20->cipher_config.reserved[0] = 0;
1684                 cipher20->cipher_config.reserved[1] = 0;
1685                 cipher20->cipher_config.reserved[2] = 0;
1686
1687                 rte_memcpy(cipher20->key, final_key, cipherkeylen);
1688                 cdesc->cd_cur_ptr +=
1689                         sizeof(struct icp_qat_hw_ucs_cipher_config) +
1690                                         cipherkeylen;
1691         } else {
1692                 memcpy(cipher->key, cipherkey, cipherkeylen);
1693                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1694                                         cipherkeylen;
1695         }
1696
1697         if (cdesc->is_single_pass) {
1698                 QAT_FIELD_SET(cipher->cipher_config.val,
1699                         cdesc->digest_length,
1700                         QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
1701                         QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
1702                 /* UCS and SPC 1.8/2.0 share configuration of 2nd config word */
1703                 cdesc->cd.cipher.cipher_config.reserved =
1704                                 ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
1705                                         cdesc->aad_len);
1706                 cdesc->slice_types |= QAT_CRYPTO_SLICE_SPC;
1707         }
1708
1709         if (total_key_size > cipherkeylen) {
1710                 uint32_t padding_size =  total_key_size-cipherkeylen;
1711                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1712                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1713                         /* K3 not provided so use K1 = K3*/
1714                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1715                 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1716                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1717                         /* K2 and K3 not provided so use K1 = K2 = K3*/
1718                         memcpy(cdesc->cd_cur_ptr, cipherkey,
1719                                 cipherkeylen);
1720                         memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1721                                 cipherkey, cipherkeylen);
1722                 } else
1723                         memset(cdesc->cd_cur_ptr, 0, padding_size);
1724
1725                 cdesc->cd_cur_ptr += padding_size;
1726         }
1727         if (cdesc->is_ucs) {
1728                 /*
1729                  * These values match in terms of position auth
1730                  * slice request fields
1731                  */
1732                 req_ucs->spc_auth_res_sz = cdesc->digest_length;
1733                 if (!cdesc->is_gmac) {
1734                         req_ucs->spc_aad_sz = cdesc->aad_len;
1735                         req_ucs->spc_aad_offset = 0;
1736                 }
1737         } else if (cdesc->is_single_pass) {
1738                 req_cipher->spc_aad_sz = cdesc->aad_len;
1739                 req_cipher->spc_auth_res_sz = cdesc->digest_length;
1740         }
1741         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1742         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1743         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1744
1745         return 0;
1746 }
1747
1748 int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
1749                                                 const uint8_t *authkey,
1750                                                 uint32_t authkeylen,
1751                                                 uint32_t aad_length,
1752                                                 uint32_t digestsize,
1753                                                 unsigned int operation)
1754 {
1755         struct icp_qat_hw_auth_setup *hash;
1756         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1757         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1758         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1759         void *ptr = &req_tmpl->cd_ctrl;
1760         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1761         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1762         struct icp_qat_fw_la_auth_req_params *auth_param =
1763                 (struct icp_qat_fw_la_auth_req_params *)
1764                 ((char *)&req_tmpl->serv_specif_rqpars +
1765                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1766         uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1767         uint16_t hash_offset, cd_size;
1768         uint32_t *aad_len = NULL;
1769         uint32_t wordIndex  = 0;
1770         uint32_t *pTempKey;
1771
1772         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1773                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1774                                         ICP_QAT_FW_SLICE_AUTH);
1775                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1776                                         ICP_QAT_FW_SLICE_DRAM_WR);
1777                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1778         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1779                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1780                                 ICP_QAT_FW_SLICE_AUTH);
1781                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1782                                 ICP_QAT_FW_SLICE_CIPHER);
1783                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1784                                 ICP_QAT_FW_SLICE_CIPHER);
1785                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1786                                 ICP_QAT_FW_SLICE_DRAM_WR);
1787                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1788         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1789                 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1790                 return -EFAULT;
1791         }
1792
1793         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY)
1794                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
1795         else
1796                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
1797
1798         /*
1799          * Setup the inner hash config
1800          */
1801         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1802         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
1803         hash->auth_config.reserved = 0;
1804         hash->auth_config.config =
1805                         ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
1806                                 cdesc->qat_hash_alg, digestsize);
1807
1808         if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
1809                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
1810                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
1811                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
1812                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
1813                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
1814                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
1815                         )
1816                 hash->auth_counter.counter = 0;
1817         else {
1818                 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
1819
1820                 if (block_size < 0)
1821                         return block_size;
1822                 hash->auth_counter.counter = rte_bswap32(block_size);
1823         }
1824
1825         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
1826
1827         /*
1828          * cd_cur_ptr now points at the state1 information.
1829          */
1830         switch (cdesc->qat_hash_alg) {
1831         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1832                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1833                         /* Plain SHA-1 */
1834                         rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
1835                                         sizeof(sha1InitialState));
1836                         state1_size = qat_hash_get_state1_size(
1837                                         cdesc->qat_hash_alg);
1838                         break;
1839                 }
1840                 /* SHA-1 HMAC */
1841                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
1842                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1843                         cdesc->aes_cmac)) {
1844                         QAT_LOG(ERR, "(SHA)precompute failed");
1845                         return -EFAULT;
1846                 }
1847                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
1848                 break;
1849         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1850                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1851                         /* Plain SHA-224 */
1852                         rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
1853                                         sizeof(sha224InitialState));
1854                         state1_size = qat_hash_get_state1_size(
1855                                         cdesc->qat_hash_alg);
1856                         break;
1857                 }
1858                 /* SHA-224 HMAC */
1859                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
1860                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1861                         cdesc->aes_cmac)) {
1862                         QAT_LOG(ERR, "(SHA)precompute failed");
1863                         return -EFAULT;
1864                 }
1865                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
1866                 break;
1867         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1868                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1869                         /* Plain SHA-256 */
1870                         rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
1871                                         sizeof(sha256InitialState));
1872                         state1_size = qat_hash_get_state1_size(
1873                                         cdesc->qat_hash_alg);
1874                         break;
1875                 }
1876                 /* SHA-256 HMAC */
1877                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
1878                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1879                         cdesc->aes_cmac)) {
1880                         QAT_LOG(ERR, "(SHA)precompute failed");
1881                         return -EFAULT;
1882                 }
1883                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
1884                 break;
1885         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1886                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1887                         /* Plain SHA-384 */
1888                         rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
1889                                         sizeof(sha384InitialState));
1890                         state1_size = qat_hash_get_state1_size(
1891                                         cdesc->qat_hash_alg);
1892                         break;
1893                 }
1894                 /* SHA-384 HMAC */
1895                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
1896                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1897                         cdesc->aes_cmac)) {
1898                         QAT_LOG(ERR, "(SHA)precompute failed");
1899                         return -EFAULT;
1900                 }
1901                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
1902                 break;
1903         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1904                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1905                         /* Plain SHA-512 */
1906                         rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
1907                                         sizeof(sha512InitialState));
1908                         state1_size = qat_hash_get_state1_size(
1909                                         cdesc->qat_hash_alg);
1910                         break;
1911                 }
1912                 /* SHA-512 HMAC */
1913                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
1914                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1915                         cdesc->aes_cmac)) {
1916                         QAT_LOG(ERR, "(SHA)precompute failed");
1917                         return -EFAULT;
1918                 }
1919                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
1920                 break;
1921         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1922                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1923
1924                 if (cdesc->aes_cmac)
1925                         memset(cdesc->cd_cur_ptr, 0, state1_size);
1926                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
1927                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
1928                         &state2_size, cdesc->aes_cmac)) {
1929                         cdesc->aes_cmac ? QAT_LOG(ERR,
1930                                                   "(CMAC)precompute failed")
1931                                         : QAT_LOG(ERR,
1932                                                   "(XCBC)precompute failed");
1933                         return -EFAULT;
1934                 }
1935                 break;
1936         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1937         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1938                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1939                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
1940                 if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
1941                         authkeylen, cdesc->cd_cur_ptr + state1_size,
1942                         &state2_size, cdesc->aes_cmac)) {
1943                         QAT_LOG(ERR, "(GCM)precompute failed");
1944                         return -EFAULT;
1945                 }
1946                 /*
1947                  * Write (the length of AAD) into bytes 16-19 of state2
1948                  * in big-endian format. This field is 8 bytes
1949                  */
1950                 auth_param->u2.aad_sz =
1951                                 RTE_ALIGN_CEIL(aad_length, 16);
1952                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
1953
1954                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
1955                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
1956                                         ICP_QAT_HW_GALOIS_H_SZ);
1957                 *aad_len = rte_bswap32(aad_length);
1958                 cdesc->aad_len = aad_length;
1959                 break;
1960         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1961                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1962                 state1_size = qat_hash_get_state1_size(
1963                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
1964                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
1965                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1966
1967                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
1968                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
1969                 cipherconfig->cipher_config.val =
1970                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
1971                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
1972                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
1973                         ICP_QAT_HW_CIPHER_ENCRYPT);
1974                 memcpy(cipherconfig->key, authkey, authkeylen);
1975                 memset(cipherconfig->key + authkeylen,
1976                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
1977                 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
1978                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1979                 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1980                 break;
1981         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1982                 hash->auth_config.config =
1983                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
1984                                 cdesc->qat_hash_alg, digestsize);
1985                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1986                 state1_size = qat_hash_get_state1_size(
1987                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
1988                 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
1989                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
1990                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
1991
1992                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1993                 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1994                 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1995                 cdesc->min_qat_dev_gen = QAT_GEN2;
1996
1997                 break;
1998         case ICP_QAT_HW_AUTH_ALGO_MD5:
1999                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
2000                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
2001                         cdesc->aes_cmac)) {
2002                         QAT_LOG(ERR, "(MD5)precompute failed");
2003                         return -EFAULT;
2004                 }
2005                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
2006                 break;
2007         case ICP_QAT_HW_AUTH_ALGO_NULL:
2008                 state1_size = qat_hash_get_state1_size(
2009                                 ICP_QAT_HW_AUTH_ALGO_NULL);
2010                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
2011                 break;
2012         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
2013                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
2014                 state1_size = qat_hash_get_state1_size(
2015                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
2016                 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
2017                                 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
2018
2019                 if (aad_length > 0) {
2020                         aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
2021                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
2022                         auth_param->u2.aad_sz =
2023                         RTE_ALIGN_CEIL(aad_length,
2024                         ICP_QAT_HW_CCM_AAD_ALIGNMENT);
2025                 } else {
2026                         auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
2027                 }
2028                 cdesc->aad_len = aad_length;
2029                 hash->auth_counter.counter = 0;
2030
2031                 hash_cd_ctrl->outer_prefix_sz = digestsize;
2032                 auth_param->hash_state_sz = digestsize;
2033
2034                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2035                 break;
2036         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
2037                 state1_size = qat_hash_get_state1_size(
2038                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
2039                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
2040                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
2041                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
2042                                                         + authkeylen);
2043                 /*
2044                 * The Inner Hash Initial State2 block must contain IK
2045                 * (Initialisation Key), followed by IK XOR-ed with KM
2046                 * (Key Modifier): IK||(IK^KM).
2047                 */
2048                 /* write the auth key */
2049                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2050                 /* initialise temp key with auth key */
2051                 memcpy(pTempKey, authkey, authkeylen);
2052                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
2053                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
2054                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
2055                 break;
2056         default:
2057                 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
2058                 return -EFAULT;
2059         }
2060
2061         /* Auth CD config setup */
2062         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
2063         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
2064         hash_cd_ctrl->inner_res_sz = digestsize;
2065         hash_cd_ctrl->final_sz = digestsize;
2066         hash_cd_ctrl->inner_state1_sz = state1_size;
2067         auth_param->auth_res_sz = digestsize;
2068
2069         hash_cd_ctrl->inner_state2_sz  = state2_size;
2070         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2071                         ((sizeof(struct icp_qat_hw_auth_setup) +
2072                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2073                                         >> 3);
2074
2075         cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2076         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2077
2078         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2079         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2080
2081         return 0;
2082 }
2083
2084 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2085 {
2086         switch (key_len) {
2087         case ICP_QAT_HW_AES_128_KEY_SZ:
2088                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2089                 break;
2090         case ICP_QAT_HW_AES_192_KEY_SZ:
2091                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2092                 break;
2093         case ICP_QAT_HW_AES_256_KEY_SZ:
2094                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2095                 break;
2096         default:
2097                 return -EINVAL;
2098         }
2099         return 0;
2100 }
2101
2102 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2103                 enum icp_qat_hw_cipher_algo *alg)
2104 {
2105         switch (key_len) {
2106         case ICP_QAT_HW_AES_128_KEY_SZ:
2107                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2108                 break;
2109         case ICP_QAT_HW_AES_256_KEY_SZ:
2110                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2111                 break;
2112         default:
2113                 return -EINVAL;
2114         }
2115         return 0;
2116 }
2117
2118 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2119 {
2120         switch (key_len) {
2121         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2122                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2123                 break;
2124         default:
2125                 return -EINVAL;
2126         }
2127         return 0;
2128 }
2129
2130 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2131 {
2132         switch (key_len) {
2133         case ICP_QAT_HW_KASUMI_KEY_SZ:
2134                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2135                 break;
2136         default:
2137                 return -EINVAL;
2138         }
2139         return 0;
2140 }
2141
2142 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2143 {
2144         switch (key_len) {
2145         case ICP_QAT_HW_DES_KEY_SZ:
2146                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2147                 break;
2148         default:
2149                 return -EINVAL;
2150         }
2151         return 0;
2152 }
2153
2154 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2155 {
2156         switch (key_len) {
2157         case QAT_3DES_KEY_SZ_OPT1:
2158         case QAT_3DES_KEY_SZ_OPT2:
2159         case QAT_3DES_KEY_SZ_OPT3:
2160                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2161                 break;
2162         default:
2163                 return -EINVAL;
2164         }
2165         return 0;
2166 }
2167
2168 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2169 {
2170         switch (key_len) {
2171         case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2172                 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2173                 break;
2174         default:
2175                 return -EINVAL;
2176         }
2177         return 0;
2178 }
2179
2180 #ifdef RTE_LIB_SECURITY
2181 static int
2182 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2183 {
2184         struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2185         struct rte_security_docsis_xform *docsis = &conf->docsis;
2186
2187         /* CRC generate -> Cipher encrypt */
2188         if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2189
2190                 if (crypto_sym != NULL &&
2191                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2192                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2193                     crypto_sym->cipher.algo ==
2194                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2195                     (crypto_sym->cipher.key.length ==
2196                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2197                      crypto_sym->cipher.key.length ==
2198                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2199                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2200                     crypto_sym->next == NULL) {
2201                         return 0;
2202                 }
2203         /* Cipher decrypt -> CRC verify */
2204         } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2205
2206                 if (crypto_sym != NULL &&
2207                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2208                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2209                     crypto_sym->cipher.algo ==
2210                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2211                     (crypto_sym->cipher.key.length ==
2212                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2213                      crypto_sym->cipher.key.length ==
2214                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2215                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2216                     crypto_sym->next == NULL) {
2217                         return 0;
2218                 }
2219         }
2220
2221         return -EINVAL;
2222 }
2223
2224 static int
2225 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2226                 struct rte_security_session_conf *conf, void *session_private)
2227 {
2228         int ret;
2229         int qat_cmd_id;
2230         struct rte_crypto_sym_xform *xform = NULL;
2231         struct qat_sym_session *session = session_private;
2232
2233         /* Clear the session */
2234         memset(session, 0, qat_sym_session_get_private_size(dev));
2235
2236         ret = qat_sec_session_check_docsis(conf);
2237         if (ret) {
2238                 QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2239                 return ret;
2240         }
2241
2242         xform = conf->crypto_xform;
2243
2244         /* Verify the session physical address is known */
2245         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2246         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2247                 QAT_LOG(ERR,
2248                         "Session physical address unknown. Bad memory pool.");
2249                 return -EINVAL;
2250         }
2251
2252         /* Set context descriptor physical address */
2253         session->cd_paddr = session_paddr +
2254                         offsetof(struct qat_sym_session, cd);
2255
2256         session->min_qat_dev_gen = QAT_GEN1;
2257
2258         /* Get requested QAT command id - should be cipher */
2259         qat_cmd_id = qat_get_cmd_id(xform);
2260         if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2261                 QAT_LOG(ERR, "Unsupported xform chain requested");
2262                 return -ENOTSUP;
2263         }
2264         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2265
2266         ret = qat_sym_session_configure_cipher(dev, xform, session);
2267         if (ret < 0)
2268                 return ret;
2269         qat_sym_session_finalize(session);
2270
2271         return 0;
2272 }
2273
2274 int
2275 qat_security_session_create(void *dev,
2276                                 struct rte_security_session_conf *conf,
2277                                 struct rte_security_session *sess,
2278                                 struct rte_mempool *mempool)
2279 {
2280         void *sess_private_data;
2281         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2282         int ret;
2283
2284         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2285                         conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2286                 QAT_LOG(ERR, "Invalid security protocol");
2287                 return -EINVAL;
2288         }
2289
2290         if (rte_mempool_get(mempool, &sess_private_data)) {
2291                 QAT_LOG(ERR, "Couldn't get object from session mempool");
2292                 return -ENOMEM;
2293         }
2294
2295         ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2296                         sess_private_data);
2297         if (ret != 0) {
2298                 QAT_LOG(ERR, "Failed to configure session parameters");
2299                 /* Return session to mempool */
2300                 rte_mempool_put(mempool, sess_private_data);
2301                 return ret;
2302         }
2303
2304         set_sec_session_private_data(sess, sess_private_data);
2305
2306         return ret;
2307 }
2308
2309 int
2310 qat_security_session_destroy(void *dev __rte_unused,
2311                                  struct rte_security_session *sess)
2312 {
2313         void *sess_priv = get_sec_session_private_data(sess);
2314         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2315
2316         if (sess_priv) {
2317                 if (s->bpi_ctx)
2318                         bpi_cipher_ctx_free(s->bpi_ctx);
2319                 memset(s, 0, qat_sym_session_get_private_size(dev));
2320                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2321
2322                 set_sec_session_private_data(sess, NULL);
2323                 rte_mempool_put(sess_mp, sess_priv);
2324         }
2325         return 0;
2326 }
2327 #endif