net/ngbe: support MTU set
[dpdk.git] / drivers / crypto / qat / qat_sym_session.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  * Copyright(c) 2015-2019 Intel Corporation
3  */
4
5 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
6 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
7 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
8 #include <openssl/evp.h>        /* Needed for bpi runt block processing */
9
10 #include <rte_memcpy.h>
11 #include <rte_common.h>
12 #include <rte_spinlock.h>
13 #include <rte_byteorder.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_crypto_sym.h>
17 #ifdef RTE_LIB_SECURITY
18 #include <rte_security.h>
19 #endif
20
21 #include "qat_logs.h"
22 #include "qat_sym_session.h"
23 #include "qat_sym_pmd.h"
24
25 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
26 static const uint8_t sha1InitialState[] = {
27         0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
28         0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
29
30 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
31 static const uint8_t sha224InitialState[] = {
32         0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
33         0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
34         0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
35
36 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
37 static const uint8_t sha256InitialState[] = {
38         0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
39         0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
40         0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
41
42 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
43 static const uint8_t sha384InitialState[] = {
44         0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
45         0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
46         0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
47         0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
48         0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
49         0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
50
51 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
52 static const uint8_t sha512InitialState[] = {
53         0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
54         0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
55         0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
56         0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
57         0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
58         0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
59
60 static int
61 qat_sym_cd_cipher_set(struct qat_sym_session *cd,
62                                                 const uint8_t *enckey,
63                                                 uint32_t enckeylen);
64
65 static int
66 qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
67                                                 const uint8_t *authkey,
68                                                 uint32_t authkeylen,
69                                                 uint32_t aad_length,
70                                                 uint32_t digestsize,
71                                                 unsigned int operation);
72 static void
73 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
74
75 /* Req/cd init functions */
76
77 static void
78 qat_sym_session_finalize(struct qat_sym_session *session)
79 {
80         qat_sym_session_init_common_hdr(session);
81 }
82
83 /** Frees a context previously created
84  *  Depends on openssl libcrypto
85  */
86 static void
87 bpi_cipher_ctx_free(void *bpi_ctx)
88 {
89         if (bpi_ctx != NULL)
90                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
91 }
92
93 /** Creates a context in either AES or DES in ECB mode
94  *  Depends on openssl libcrypto
95  */
96 static int
97 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
98                 enum rte_crypto_cipher_operation direction __rte_unused,
99                 const uint8_t *key, uint16_t key_length, void **ctx)
100 {
101         const EVP_CIPHER *algo = NULL;
102         int ret;
103         *ctx = EVP_CIPHER_CTX_new();
104
105         if (*ctx == NULL) {
106                 ret = -ENOMEM;
107                 goto ctx_init_err;
108         }
109
110         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
111                 algo = EVP_des_ecb();
112         else
113                 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
114                         algo = EVP_aes_128_ecb();
115                 else
116                         algo = EVP_aes_256_ecb();
117
118         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
119         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
120                 ret = -EINVAL;
121                 goto ctx_init_err;
122         }
123
124         return 0;
125
126 ctx_init_err:
127         if (*ctx != NULL)
128                 EVP_CIPHER_CTX_free(*ctx);
129         return ret;
130 }
131
132 static int
133 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
134                 struct qat_sym_dev_private *internals)
135 {
136         int i = 0;
137         const struct rte_cryptodev_capabilities *capability;
138
139         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
140                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
141                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
142                         continue;
143
144                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
145                         continue;
146
147                 if (capability->sym.cipher.algo == algo)
148                         return 1;
149         }
150         return 0;
151 }
152
153 static int
154 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
155                 struct qat_sym_dev_private *internals)
156 {
157         int i = 0;
158         const struct rte_cryptodev_capabilities *capability;
159
160         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
161                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
162                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
163                         continue;
164
165                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
166                         continue;
167
168                 if (capability->sym.auth.algo == algo)
169                         return 1;
170         }
171         return 0;
172 }
173
174 void
175 qat_sym_session_clear(struct rte_cryptodev *dev,
176                 struct rte_cryptodev_sym_session *sess)
177 {
178         uint8_t index = dev->driver_id;
179         void *sess_priv = get_sym_session_private_data(sess, index);
180         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
181
182         if (sess_priv) {
183                 if (s->bpi_ctx)
184                         bpi_cipher_ctx_free(s->bpi_ctx);
185                 memset(s, 0, qat_sym_session_get_private_size(dev));
186                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
187
188                 set_sym_session_private_data(sess, index, NULL);
189                 rte_mempool_put(sess_mp, sess_priv);
190         }
191 }
192
193 static int
194 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
195 {
196         /* Cipher Only */
197         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
198                 return ICP_QAT_FW_LA_CMD_CIPHER;
199
200         /* Authentication Only */
201         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
202                 return ICP_QAT_FW_LA_CMD_AUTH;
203
204         /* AEAD */
205         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
206                 /* AES-GCM and AES-CCM works with different direction
207                  * GCM first encrypts and generate hash where AES-CCM
208                  * first generate hash and encrypts. Similar relation
209                  * applies to decryption.
210                  */
211                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
212                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
213                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
214                         else
215                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
216                 else
217                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
218                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
219                         else
220                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
221         }
222
223         if (xform->next == NULL)
224                 return -1;
225
226         /* Cipher then Authenticate */
227         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
228                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
229                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
230
231         /* Authenticate then Cipher */
232         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
233                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
234                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
235
236         return -1;
237 }
238
239 static struct rte_crypto_auth_xform *
240 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
241 {
242         do {
243                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
244                         return &xform->auth;
245
246                 xform = xform->next;
247         } while (xform);
248
249         return NULL;
250 }
251
252 static struct rte_crypto_cipher_xform *
253 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
254 {
255         do {
256                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
257                         return &xform->cipher;
258
259                 xform = xform->next;
260         } while (xform);
261
262         return NULL;
263 }
264
265 int
266 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
267                 struct rte_crypto_sym_xform *xform,
268                 struct qat_sym_session *session)
269 {
270         struct qat_sym_dev_private *internals = dev->data->dev_private;
271         struct rte_crypto_cipher_xform *cipher_xform = NULL;
272         enum qat_device_gen qat_dev_gen =
273                                 internals->qat_dev->qat_dev_gen;
274         int ret;
275
276         /* Get cipher xform from crypto xform chain */
277         cipher_xform = qat_get_cipher_xform(xform);
278
279         session->cipher_iv.offset = cipher_xform->iv.offset;
280         session->cipher_iv.length = cipher_xform->iv.length;
281
282         switch (cipher_xform->algo) {
283         case RTE_CRYPTO_CIPHER_AES_CBC:
284                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
285                                 &session->qat_cipher_alg) != 0) {
286                         QAT_LOG(ERR, "Invalid AES cipher key size");
287                         ret = -EINVAL;
288                         goto error_out;
289                 }
290                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
291                 break;
292         case RTE_CRYPTO_CIPHER_AES_CTR:
293                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
294                                 &session->qat_cipher_alg) != 0) {
295                         QAT_LOG(ERR, "Invalid AES cipher key size");
296                         ret = -EINVAL;
297                         goto error_out;
298                 }
299                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
300                 if (qat_dev_gen == QAT_GEN4)
301                         session->is_ucs = 1;
302                 break;
303         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
304                 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
305                                         &session->qat_cipher_alg) != 0) {
306                         QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
307                         ret = -EINVAL;
308                         goto error_out;
309                 }
310                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
311                 break;
312         case RTE_CRYPTO_CIPHER_NULL:
313                 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
314                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
315                 break;
316         case RTE_CRYPTO_CIPHER_KASUMI_F8:
317                 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
318                                         &session->qat_cipher_alg) != 0) {
319                         QAT_LOG(ERR, "Invalid KASUMI cipher key size");
320                         ret = -EINVAL;
321                         goto error_out;
322                 }
323                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
324                 break;
325         case RTE_CRYPTO_CIPHER_3DES_CBC:
326                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
327                                 &session->qat_cipher_alg) != 0) {
328                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
329                         ret = -EINVAL;
330                         goto error_out;
331                 }
332                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
333                 break;
334         case RTE_CRYPTO_CIPHER_DES_CBC:
335                 if (qat_sym_validate_des_key(cipher_xform->key.length,
336                                 &session->qat_cipher_alg) != 0) {
337                         QAT_LOG(ERR, "Invalid DES cipher key size");
338                         ret = -EINVAL;
339                         goto error_out;
340                 }
341                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
342                 break;
343         case RTE_CRYPTO_CIPHER_3DES_CTR:
344                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
345                                 &session->qat_cipher_alg) != 0) {
346                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
347                         ret = -EINVAL;
348                         goto error_out;
349                 }
350                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
351                 break;
352         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
353                 ret = bpi_cipher_ctx_init(
354                                         cipher_xform->algo,
355                                         cipher_xform->op,
356                                         cipher_xform->key.data,
357                                         cipher_xform->key.length,
358                                         &session->bpi_ctx);
359                 if (ret != 0) {
360                         QAT_LOG(ERR, "failed to create DES BPI ctx");
361                         goto error_out;
362                 }
363                 if (qat_sym_validate_des_key(cipher_xform->key.length,
364                                 &session->qat_cipher_alg) != 0) {
365                         QAT_LOG(ERR, "Invalid DES cipher key size");
366                         ret = -EINVAL;
367                         goto error_out;
368                 }
369                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
370                 break;
371         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
372                 ret = bpi_cipher_ctx_init(
373                                         cipher_xform->algo,
374                                         cipher_xform->op,
375                                         cipher_xform->key.data,
376                                         cipher_xform->key.length,
377                                         &session->bpi_ctx);
378                 if (ret != 0) {
379                         QAT_LOG(ERR, "failed to create AES BPI ctx");
380                         goto error_out;
381                 }
382                 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
383                                 &session->qat_cipher_alg) != 0) {
384                         QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
385                         ret = -EINVAL;
386                         goto error_out;
387                 }
388                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
389                 break;
390         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
391                 if (!qat_is_cipher_alg_supported(
392                         cipher_xform->algo, internals)) {
393                         QAT_LOG(ERR, "%s not supported on this device",
394                                 rte_crypto_cipher_algorithm_strings
395                                         [cipher_xform->algo]);
396                         ret = -ENOTSUP;
397                         goto error_out;
398                 }
399                 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
400                                 &session->qat_cipher_alg) != 0) {
401                         QAT_LOG(ERR, "Invalid ZUC cipher key size");
402                         ret = -EINVAL;
403                         goto error_out;
404                 }
405                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
406                 break;
407         case RTE_CRYPTO_CIPHER_AES_XTS:
408                 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
409                         QAT_LOG(ERR, "AES-XTS-192 not supported");
410                         ret = -EINVAL;
411                         goto error_out;
412                 }
413                 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
414                                 &session->qat_cipher_alg) != 0) {
415                         QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
416                         ret = -EINVAL;
417                         goto error_out;
418                 }
419                 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
420                 break;
421         case RTE_CRYPTO_CIPHER_3DES_ECB:
422         case RTE_CRYPTO_CIPHER_AES_ECB:
423         case RTE_CRYPTO_CIPHER_AES_F8:
424         case RTE_CRYPTO_CIPHER_ARC4:
425                 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
426                                 cipher_xform->algo);
427                 ret = -ENOTSUP;
428                 goto error_out;
429         default:
430                 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
431                                 cipher_xform->algo);
432                 ret = -EINVAL;
433                 goto error_out;
434         }
435
436         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
437                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
438         else
439                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
440
441         if (qat_sym_cd_cipher_set(session,
442                                                 cipher_xform->key.data,
443                                                 cipher_xform->key.length)) {
444                 ret = -EINVAL;
445                 goto error_out;
446         }
447
448         return 0;
449
450 error_out:
451         if (session->bpi_ctx) {
452                 bpi_cipher_ctx_free(session->bpi_ctx);
453                 session->bpi_ctx = NULL;
454         }
455         return ret;
456 }
457
458 int
459 qat_sym_session_configure(struct rte_cryptodev *dev,
460                 struct rte_crypto_sym_xform *xform,
461                 struct rte_cryptodev_sym_session *sess,
462                 struct rte_mempool *mempool)
463 {
464         void *sess_private_data;
465         int ret;
466
467         if (rte_mempool_get(mempool, &sess_private_data)) {
468                 CDEV_LOG_ERR(
469                         "Couldn't get object from session mempool");
470                 return -ENOMEM;
471         }
472
473         ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
474         if (ret != 0) {
475                 QAT_LOG(ERR,
476                     "Crypto QAT PMD: failed to configure session parameters");
477
478                 /* Return session to mempool */
479                 rte_mempool_put(mempool, sess_private_data);
480                 return ret;
481         }
482
483         set_sym_session_private_data(sess, dev->driver_id,
484                 sess_private_data);
485
486         return 0;
487 }
488
489 static void
490 qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
491                 uint8_t hash_flag)
492 {
493         struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
494         struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
495                         (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
496                         session->fw_req.cd_ctrl.content_desc_ctrl_lw;
497
498         /* Set the Use Extended Protocol Flags bit in LW 1 */
499         QAT_FIELD_SET(header->comn_req_flags,
500                         QAT_COMN_EXT_FLAGS_USED,
501                         QAT_COMN_EXT_FLAGS_BITPOS,
502                         QAT_COMN_EXT_FLAGS_MASK);
503
504         /* Set Hash Flags in LW 28 */
505         cd_ctrl->hash_flags |= hash_flag;
506
507         /* Set proto flags in LW 1 */
508         switch (session->qat_cipher_alg) {
509         case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
510                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
511                                 ICP_QAT_FW_LA_SNOW_3G_PROTO);
512                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
513                                 header->serv_specif_flags, 0);
514                 break;
515         case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
516                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
517                                 ICP_QAT_FW_LA_NO_PROTO);
518                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
519                                 header->serv_specif_flags,
520                                 ICP_QAT_FW_LA_ZUC_3G_PROTO);
521                 break;
522         default:
523                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
524                                 ICP_QAT_FW_LA_NO_PROTO);
525                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
526                                 header->serv_specif_flags, 0);
527                 break;
528         }
529 }
530
531 static void
532 qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
533                 struct qat_sym_session *session)
534 {
535         const struct qat_sym_dev_private *qat_private = dev->data->dev_private;
536         enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
537                         QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
538
539         if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
540                         session->qat_cipher_alg !=
541                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
542                 session->min_qat_dev_gen = min_dev_gen;
543                 qat_sym_session_set_ext_hash_flags(session,
544                         1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
545         } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
546                         session->qat_cipher_alg !=
547                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
548                 session->min_qat_dev_gen = min_dev_gen;
549                 qat_sym_session_set_ext_hash_flags(session,
550                         1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
551         } else if ((session->aes_cmac ||
552                         session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
553                         (session->qat_cipher_alg ==
554                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
555                         session->qat_cipher_alg ==
556                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
557                 session->min_qat_dev_gen = min_dev_gen;
558                 qat_sym_session_set_ext_hash_flags(session, 0);
559         }
560 }
561
562 int
563 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
564                 struct rte_crypto_sym_xform *xform, void *session_private)
565 {
566         struct qat_sym_session *session = session_private;
567         struct qat_sym_dev_private *internals = dev->data->dev_private;
568         enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
569         int ret;
570         int qat_cmd_id;
571         int handle_mixed = 0;
572
573         /* Verify the session physical address is known */
574         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
575         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
576                 QAT_LOG(ERR,
577                         "Session physical address unknown. Bad memory pool.");
578                 return -EINVAL;
579         }
580
581         memset(session, 0, sizeof(*session));
582         /* Set context descriptor physical address */
583         session->cd_paddr = session_paddr +
584                         offsetof(struct qat_sym_session, cd);
585
586         session->min_qat_dev_gen = QAT_GEN1;
587         session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
588         session->is_ucs = 0;
589
590         /* Get requested QAT command id */
591         qat_cmd_id = qat_get_cmd_id(xform);
592         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
593                 QAT_LOG(ERR, "Unsupported xform chain requested");
594                 return -ENOTSUP;
595         }
596         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
597         switch (session->qat_cmd) {
598         case ICP_QAT_FW_LA_CMD_CIPHER:
599                 ret = qat_sym_session_configure_cipher(dev, xform, session);
600                 if (ret < 0)
601                         return ret;
602                 break;
603         case ICP_QAT_FW_LA_CMD_AUTH:
604                 ret = qat_sym_session_configure_auth(dev, xform, session);
605                 if (ret < 0)
606                         return ret;
607                 session->is_single_pass_gmac =
608                                qat_dev_gen == QAT_GEN3 &&
609                                xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
610                                xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
611                 break;
612         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
613                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
614                         ret = qat_sym_session_configure_aead(dev, xform,
615                                         session);
616                         if (ret < 0)
617                                 return ret;
618                 } else {
619                         ret = qat_sym_session_configure_cipher(dev,
620                                         xform, session);
621                         if (ret < 0)
622                                 return ret;
623                         ret = qat_sym_session_configure_auth(dev,
624                                         xform, session);
625                         if (ret < 0)
626                                 return ret;
627                         handle_mixed = 1;
628                 }
629                 break;
630         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
631                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
632                         ret = qat_sym_session_configure_aead(dev, xform,
633                                         session);
634                         if (ret < 0)
635                                 return ret;
636                 } else {
637                         ret = qat_sym_session_configure_auth(dev,
638                                         xform, session);
639                         if (ret < 0)
640                                 return ret;
641                         ret = qat_sym_session_configure_cipher(dev,
642                                         xform, session);
643                         if (ret < 0)
644                                 return ret;
645                         handle_mixed = 1;
646                 }
647                 break;
648         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
649         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
650         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
651         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
652         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
653         case ICP_QAT_FW_LA_CMD_MGF1:
654         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
655         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
656         case ICP_QAT_FW_LA_CMD_DELIMITER:
657         QAT_LOG(ERR, "Unsupported Service %u",
658                 session->qat_cmd);
659                 return -ENOTSUP;
660         default:
661         QAT_LOG(ERR, "Unsupported Service %u",
662                 session->qat_cmd);
663                 return -ENOTSUP;
664         }
665         qat_sym_session_finalize(session);
666         if (handle_mixed) {
667                 /* Special handling of mixed hash+cipher algorithms */
668                 qat_sym_session_handle_mixed(dev, session);
669         }
670
671         return 0;
672 }
673
674 static int
675 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
676                 const struct rte_crypto_aead_xform *aead_xform)
677 {
678         session->is_single_pass = 1;
679         session->is_auth = 1;
680         session->min_qat_dev_gen = QAT_GEN3;
681         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
682         /* Chacha-Poly is special case that use QAT CTR mode */
683         if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
684                 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
685         } else {
686                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
687         }
688         session->cipher_iv.offset = aead_xform->iv.offset;
689         session->cipher_iv.length = aead_xform->iv.length;
690         session->aad_len = aead_xform->aad_length;
691         session->digest_length = aead_xform->digest_length;
692
693         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
694                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
695                 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
696         } else {
697                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
698                 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
699         }
700
701         return 0;
702 }
703
704 int
705 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
706                                 struct rte_crypto_sym_xform *xform,
707                                 struct qat_sym_session *session)
708 {
709         struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
710         struct qat_sym_dev_private *internals = dev->data->dev_private;
711         const uint8_t *key_data = auth_xform->key.data;
712         uint8_t key_length = auth_xform->key.length;
713         enum qat_device_gen qat_dev_gen =
714                         internals->qat_dev->qat_dev_gen;
715
716         session->aes_cmac = 0;
717         session->auth_key_length = auth_xform->key.length;
718         session->auth_iv.offset = auth_xform->iv.offset;
719         session->auth_iv.length = auth_xform->iv.length;
720         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
721         session->is_auth = 1;
722         session->digest_length = auth_xform->digest_length;
723
724         switch (auth_xform->algo) {
725         case RTE_CRYPTO_AUTH_SHA1:
726                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
727                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
728                 break;
729         case RTE_CRYPTO_AUTH_SHA224:
730                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
731                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
732                 break;
733         case RTE_CRYPTO_AUTH_SHA256:
734                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
735                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
736                 break;
737         case RTE_CRYPTO_AUTH_SHA384:
738                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
739                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
740                 break;
741         case RTE_CRYPTO_AUTH_SHA512:
742                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
743                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
744                 break;
745         case RTE_CRYPTO_AUTH_SHA1_HMAC:
746                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
747                 break;
748         case RTE_CRYPTO_AUTH_SHA224_HMAC:
749                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
750                 break;
751         case RTE_CRYPTO_AUTH_SHA256_HMAC:
752                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
753                 break;
754         case RTE_CRYPTO_AUTH_SHA384_HMAC:
755                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
756                 break;
757         case RTE_CRYPTO_AUTH_SHA512_HMAC:
758                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
759                 break;
760         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
761                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
762                 break;
763         case RTE_CRYPTO_AUTH_AES_CMAC:
764                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
765                 session->aes_cmac = 1;
766                 break;
767         case RTE_CRYPTO_AUTH_AES_GMAC:
768                 if (qat_sym_validate_aes_key(auth_xform->key.length,
769                                 &session->qat_cipher_alg) != 0) {
770                         QAT_LOG(ERR, "Invalid AES key size");
771                         return -EINVAL;
772                 }
773                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
774                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
775                 if (session->auth_iv.length == 0)
776                         session->auth_iv.length = AES_GCM_J0_LEN;
777                 else
778                         session->is_iv12B = 1;
779                 if (qat_dev_gen == QAT_GEN4) {
780                         session->is_cnt_zero = 1;
781                         session->is_ucs = 1;
782                 }
783                 break;
784         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
785                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
786                 break;
787         case RTE_CRYPTO_AUTH_MD5_HMAC:
788                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
789                 break;
790         case RTE_CRYPTO_AUTH_NULL:
791                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
792                 break;
793         case RTE_CRYPTO_AUTH_KASUMI_F9:
794                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
795                 break;
796         case RTE_CRYPTO_AUTH_ZUC_EIA3:
797                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
798                         QAT_LOG(ERR, "%s not supported on this device",
799                                 rte_crypto_auth_algorithm_strings
800                                 [auth_xform->algo]);
801                         return -ENOTSUP;
802                 }
803                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
804                 break;
805         case RTE_CRYPTO_AUTH_MD5:
806         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
807                 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
808                                 auth_xform->algo);
809                 return -ENOTSUP;
810         default:
811                 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
812                                 auth_xform->algo);
813                 return -EINVAL;
814         }
815
816         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
817                 session->is_gmac = 1;
818                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
819                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
820                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
821                         /*
822                          * It needs to create cipher desc content first,
823                          * then authentication
824                          */
825                         if (qat_sym_cd_cipher_set(session,
826                                                 auth_xform->key.data,
827                                                 auth_xform->key.length))
828                                 return -EINVAL;
829
830                         if (qat_sym_cd_auth_set(session,
831                                                 key_data,
832                                                 key_length,
833                                                 0,
834                                                 auth_xform->digest_length,
835                                                 auth_xform->op))
836                                 return -EINVAL;
837                 } else {
838                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
839                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
840                         /*
841                          * It needs to create authentication desc content first,
842                          * then cipher
843                          */
844
845                         if (qat_sym_cd_auth_set(session,
846                                         key_data,
847                                         key_length,
848                                         0,
849                                         auth_xform->digest_length,
850                                         auth_xform->op))
851                                 return -EINVAL;
852
853                         if (qat_sym_cd_cipher_set(session,
854                                                 auth_xform->key.data,
855                                                 auth_xform->key.length))
856                                 return -EINVAL;
857                 }
858         } else {
859                 if (qat_sym_cd_auth_set(session,
860                                 key_data,
861                                 key_length,
862                                 0,
863                                 auth_xform->digest_length,
864                                 auth_xform->op))
865                         return -EINVAL;
866         }
867
868         return 0;
869 }
870
871 int
872 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
873                                 struct rte_crypto_sym_xform *xform,
874                                 struct qat_sym_session *session)
875 {
876         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
877         enum rte_crypto_auth_operation crypto_operation;
878         struct qat_sym_dev_private *internals =
879                         dev->data->dev_private;
880         enum qat_device_gen qat_dev_gen =
881                         internals->qat_dev->qat_dev_gen;
882
883         /*
884          * Store AEAD IV parameters as cipher IV,
885          * to avoid unnecessary memory usage
886          */
887         session->cipher_iv.offset = xform->aead.iv.offset;
888         session->cipher_iv.length = xform->aead.iv.length;
889
890         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
891         session->is_auth = 1;
892         session->digest_length = aead_xform->digest_length;
893
894         session->is_single_pass = 0;
895         switch (aead_xform->algo) {
896         case RTE_CRYPTO_AEAD_AES_GCM:
897                 if (qat_sym_validate_aes_key(aead_xform->key.length,
898                                 &session->qat_cipher_alg) != 0) {
899                         QAT_LOG(ERR, "Invalid AES key size");
900                         return -EINVAL;
901                 }
902                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
903                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
904
905                 if (qat_dev_gen == QAT_GEN4)
906                         session->is_ucs = 1;
907                 if (session->cipher_iv.length == 0) {
908                         session->cipher_iv.length = AES_GCM_J0_LEN;
909                         break;
910                 }
911                 session->is_iv12B = 1;
912                 if (qat_dev_gen < QAT_GEN3)
913                         break;
914                 qat_sym_session_handle_single_pass(session,
915                                 aead_xform);
916                 break;
917         case RTE_CRYPTO_AEAD_AES_CCM:
918                 if (qat_sym_validate_aes_key(aead_xform->key.length,
919                                 &session->qat_cipher_alg) != 0) {
920                         QAT_LOG(ERR, "Invalid AES key size");
921                         return -EINVAL;
922                 }
923                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
924                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
925                 if (qat_dev_gen == QAT_GEN4)
926                         session->is_ucs = 1;
927                 break;
928         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
929                 if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
930                         return -EINVAL;
931                 if (qat_dev_gen == QAT_GEN4)
932                         session->is_ucs = 1;
933                 session->qat_cipher_alg =
934                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
935                 qat_sym_session_handle_single_pass(session,
936                                                 aead_xform);
937                 break;
938         default:
939                 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
940                                 aead_xform->algo);
941                 return -EINVAL;
942         }
943
944         if (session->is_single_pass) {
945                 if (qat_sym_cd_cipher_set(session,
946                                 aead_xform->key.data, aead_xform->key.length))
947                         return -EINVAL;
948         } else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
949                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
950                         (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
951                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
952                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
953                 /*
954                  * It needs to create cipher desc content first,
955                  * then authentication
956                  */
957                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
958                         RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
959
960                 if (qat_sym_cd_cipher_set(session,
961                                         aead_xform->key.data,
962                                         aead_xform->key.length))
963                         return -EINVAL;
964
965                 if (qat_sym_cd_auth_set(session,
966                                         aead_xform->key.data,
967                                         aead_xform->key.length,
968                                         aead_xform->aad_length,
969                                         aead_xform->digest_length,
970                                         crypto_operation))
971                         return -EINVAL;
972         } else {
973                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
974                 /*
975                  * It needs to create authentication desc content first,
976                  * then cipher
977                  */
978
979                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
980                         RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
981
982                 if (qat_sym_cd_auth_set(session,
983                                         aead_xform->key.data,
984                                         aead_xform->key.length,
985                                         aead_xform->aad_length,
986                                         aead_xform->digest_length,
987                                         crypto_operation))
988                         return -EINVAL;
989
990                 if (qat_sym_cd_cipher_set(session,
991                                         aead_xform->key.data,
992                                         aead_xform->key.length))
993                         return -EINVAL;
994         }
995
996         return 0;
997 }
998
999 unsigned int qat_sym_session_get_private_size(
1000                 struct rte_cryptodev *dev __rte_unused)
1001 {
1002         return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
1003 }
1004
1005 /* returns block size in bytes per cipher algo */
1006 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
1007 {
1008         switch (qat_cipher_alg) {
1009         case ICP_QAT_HW_CIPHER_ALGO_DES:
1010                 return ICP_QAT_HW_DES_BLK_SZ;
1011         case ICP_QAT_HW_CIPHER_ALGO_3DES:
1012                 return ICP_QAT_HW_3DES_BLK_SZ;
1013         case ICP_QAT_HW_CIPHER_ALGO_AES128:
1014         case ICP_QAT_HW_CIPHER_ALGO_AES192:
1015         case ICP_QAT_HW_CIPHER_ALGO_AES256:
1016                 return ICP_QAT_HW_AES_BLK_SZ;
1017         default:
1018                 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
1019                 return -EFAULT;
1020         };
1021         return -EFAULT;
1022 }
1023
1024 /*
1025  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
1026  * This is digest size rounded up to nearest quadword
1027  */
1028 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1029 {
1030         switch (qat_hash_alg) {
1031         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1032                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
1033                                                 QAT_HW_DEFAULT_ALIGNMENT);
1034         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1035                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
1036                                                 QAT_HW_DEFAULT_ALIGNMENT);
1037         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1038                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
1039                                                 QAT_HW_DEFAULT_ALIGNMENT);
1040         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1041                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
1042                                                 QAT_HW_DEFAULT_ALIGNMENT);
1043         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1044                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1045                                                 QAT_HW_DEFAULT_ALIGNMENT);
1046         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1047                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
1048                                                 QAT_HW_DEFAULT_ALIGNMENT);
1049         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1050         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1051                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1052                                                 QAT_HW_DEFAULT_ALIGNMENT);
1053         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1054                 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1055                                                 QAT_HW_DEFAULT_ALIGNMENT);
1056         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1057                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1058                                                 QAT_HW_DEFAULT_ALIGNMENT);
1059         case ICP_QAT_HW_AUTH_ALGO_MD5:
1060                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1061                                                 QAT_HW_DEFAULT_ALIGNMENT);
1062         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1063                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1064                                                 QAT_HW_DEFAULT_ALIGNMENT);
1065         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1066                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1067                                                 QAT_HW_DEFAULT_ALIGNMENT);
1068         case ICP_QAT_HW_AUTH_ALGO_NULL:
1069                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1070                                                 QAT_HW_DEFAULT_ALIGNMENT);
1071         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1072                 /* return maximum state1 size in this case */
1073                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1074                                                 QAT_HW_DEFAULT_ALIGNMENT);
1075         default:
1076                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1077                 return -EFAULT;
1078         };
1079         return -EFAULT;
1080 }
1081
1082 /* returns digest size in bytes  per hash algo */
1083 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1084 {
1085         switch (qat_hash_alg) {
1086         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1087                 return ICP_QAT_HW_SHA1_STATE1_SZ;
1088         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1089                 return ICP_QAT_HW_SHA224_STATE1_SZ;
1090         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1091                 return ICP_QAT_HW_SHA256_STATE1_SZ;
1092         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1093                 return ICP_QAT_HW_SHA384_STATE1_SZ;
1094         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1095                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1096         case ICP_QAT_HW_AUTH_ALGO_MD5:
1097                 return ICP_QAT_HW_MD5_STATE1_SZ;
1098         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1099                 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1100         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1101                 /* return maximum digest size in this case */
1102                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1103         default:
1104                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1105                 return -EFAULT;
1106         };
1107         return -EFAULT;
1108 }
1109
1110 /* returns block size in byes per hash algo */
1111 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1112 {
1113         switch (qat_hash_alg) {
1114         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1115                 return SHA_CBLOCK;
1116         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1117                 return SHA256_CBLOCK;
1118         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1119                 return SHA256_CBLOCK;
1120         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1121                 return SHA512_CBLOCK;
1122         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1123                 return SHA512_CBLOCK;
1124         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1125                 return 16;
1126         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1127                 return ICP_QAT_HW_AES_BLK_SZ;
1128         case ICP_QAT_HW_AUTH_ALGO_MD5:
1129                 return MD5_CBLOCK;
1130         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1131                 /* return maximum block size in this case */
1132                 return SHA512_CBLOCK;
1133         default:
1134                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1135                 return -EFAULT;
1136         };
1137         return -EFAULT;
1138 }
1139
1140 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1141 {
1142         SHA_CTX ctx;
1143
1144         if (!SHA1_Init(&ctx))
1145                 return -EFAULT;
1146         SHA1_Transform(&ctx, data_in);
1147         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1148         return 0;
1149 }
1150
1151 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1152 {
1153         SHA256_CTX ctx;
1154
1155         if (!SHA224_Init(&ctx))
1156                 return -EFAULT;
1157         SHA256_Transform(&ctx, data_in);
1158         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1159         return 0;
1160 }
1161
1162 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1163 {
1164         SHA256_CTX ctx;
1165
1166         if (!SHA256_Init(&ctx))
1167                 return -EFAULT;
1168         SHA256_Transform(&ctx, data_in);
1169         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1170         return 0;
1171 }
1172
1173 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1174 {
1175         SHA512_CTX ctx;
1176
1177         if (!SHA384_Init(&ctx))
1178                 return -EFAULT;
1179         SHA512_Transform(&ctx, data_in);
1180         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1181         return 0;
1182 }
1183
1184 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1185 {
1186         SHA512_CTX ctx;
1187
1188         if (!SHA512_Init(&ctx))
1189                 return -EFAULT;
1190         SHA512_Transform(&ctx, data_in);
1191         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1192         return 0;
1193 }
1194
1195 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1196 {
1197         MD5_CTX ctx;
1198
1199         if (!MD5_Init(&ctx))
1200                 return -EFAULT;
1201         MD5_Transform(&ctx, data_in);
1202         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1203
1204         return 0;
1205 }
1206
1207 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1208                         uint8_t *data_in,
1209                         uint8_t *data_out)
1210 {
1211         int digest_size;
1212         uint8_t digest[qat_hash_get_digest_size(
1213                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1214         uint32_t *hash_state_out_be32;
1215         uint64_t *hash_state_out_be64;
1216         int i;
1217
1218         /* Initialize to avoid gcc warning */
1219         memset(digest, 0, sizeof(digest));
1220
1221         digest_size = qat_hash_get_digest_size(hash_alg);
1222         if (digest_size <= 0)
1223                 return -EFAULT;
1224
1225         hash_state_out_be32 = (uint32_t *)data_out;
1226         hash_state_out_be64 = (uint64_t *)data_out;
1227
1228         switch (hash_alg) {
1229         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1230                 if (partial_hash_sha1(data_in, digest))
1231                         return -EFAULT;
1232                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1233                         *hash_state_out_be32 =
1234                                 rte_bswap32(*(((uint32_t *)digest)+i));
1235                 break;
1236         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1237                 if (partial_hash_sha224(data_in, digest))
1238                         return -EFAULT;
1239                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1240                         *hash_state_out_be32 =
1241                                 rte_bswap32(*(((uint32_t *)digest)+i));
1242                 break;
1243         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1244                 if (partial_hash_sha256(data_in, digest))
1245                         return -EFAULT;
1246                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1247                         *hash_state_out_be32 =
1248                                 rte_bswap32(*(((uint32_t *)digest)+i));
1249                 break;
1250         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1251                 if (partial_hash_sha384(data_in, digest))
1252                         return -EFAULT;
1253                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1254                         *hash_state_out_be64 =
1255                                 rte_bswap64(*(((uint64_t *)digest)+i));
1256                 break;
1257         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1258                 if (partial_hash_sha512(data_in, digest))
1259                         return -EFAULT;
1260                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1261                         *hash_state_out_be64 =
1262                                 rte_bswap64(*(((uint64_t *)digest)+i));
1263                 break;
1264         case ICP_QAT_HW_AUTH_ALGO_MD5:
1265                 if (partial_hash_md5(data_in, data_out))
1266                         return -EFAULT;
1267                 break;
1268         default:
1269                 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1270                 return -EFAULT;
1271         }
1272
1273         return 0;
1274 }
1275 #define HMAC_IPAD_VALUE 0x36
1276 #define HMAC_OPAD_VALUE 0x5c
1277 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1278
1279 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1280
1281 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1282 {
1283         int i;
1284
1285         derived[0] = base[0] << 1;
1286         for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1287                 derived[i] = base[i] << 1;
1288                 derived[i - 1] |= base[i] >> 7;
1289         }
1290
1291         if (base[0] & 0x80)
1292                 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1293 }
1294
1295 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1296                                 const uint8_t *auth_key,
1297                                 uint16_t auth_keylen,
1298                                 uint8_t *p_state_buf,
1299                                 uint16_t *p_state_len,
1300                                 uint8_t aes_cmac)
1301 {
1302         int block_size;
1303         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1304         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1305         int i;
1306
1307         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1308
1309                 /* CMAC */
1310                 if (aes_cmac) {
1311                         AES_KEY enc_key;
1312                         uint8_t *in = NULL;
1313                         uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1314                         uint8_t *k1, *k2;
1315
1316                         auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1317
1318                         in = rte_zmalloc("AES CMAC K1",
1319                                          ICP_QAT_HW_AES_128_KEY_SZ, 16);
1320
1321                         if (in == NULL) {
1322                                 QAT_LOG(ERR, "Failed to alloc memory");
1323                                 return -ENOMEM;
1324                         }
1325
1326                         rte_memcpy(in, AES_CMAC_SEED,
1327                                    ICP_QAT_HW_AES_128_KEY_SZ);
1328                         rte_memcpy(p_state_buf, auth_key, auth_keylen);
1329
1330                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1331                                 &enc_key) != 0) {
1332                                 rte_free(in);
1333                                 return -EFAULT;
1334                         }
1335
1336                         AES_encrypt(in, k0, &enc_key);
1337
1338                         k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1339                         k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1340
1341                         aes_cmac_key_derive(k0, k1);
1342                         aes_cmac_key_derive(k1, k2);
1343
1344                         memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1345                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1346                         rte_free(in);
1347                         return 0;
1348                 } else {
1349                         static uint8_t qat_aes_xcbc_key_seed[
1350                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1351                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1352                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1353                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1354                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1355                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1356                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1357                         };
1358
1359                         uint8_t *in = NULL;
1360                         uint8_t *out = p_state_buf;
1361                         int x;
1362                         AES_KEY enc_key;
1363
1364                         in = rte_zmalloc("working mem for key",
1365                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1366                         if (in == NULL) {
1367                                 QAT_LOG(ERR, "Failed to alloc memory");
1368                                 return -ENOMEM;
1369                         }
1370
1371                         rte_memcpy(in, qat_aes_xcbc_key_seed,
1372                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1373                         for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1374                                 if (AES_set_encrypt_key(auth_key,
1375                                                         auth_keylen << 3,
1376                                                         &enc_key) != 0) {
1377                                         rte_free(in -
1378                                           (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1379                                         memset(out -
1380                                            (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1381                                           0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1382                                         return -EFAULT;
1383                                 }
1384                                 AES_encrypt(in, out, &enc_key);
1385                                 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1386                                 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1387                         }
1388                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1389                         rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1390                         return 0;
1391                 }
1392
1393         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1394                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1395                 uint8_t *in = NULL;
1396                 uint8_t *out = p_state_buf;
1397                 AES_KEY enc_key;
1398
1399                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1400                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1401                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1402                 in = rte_zmalloc("working mem for key",
1403                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
1404                 if (in == NULL) {
1405                         QAT_LOG(ERR, "Failed to alloc memory");
1406                         return -ENOMEM;
1407                 }
1408
1409                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1410                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1411                         &enc_key) != 0) {
1412                         return -EFAULT;
1413                 }
1414                 AES_encrypt(in, out, &enc_key);
1415                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1416                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1417                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1418                 rte_free(in);
1419                 return 0;
1420         }
1421
1422         block_size = qat_hash_get_block_size(hash_alg);
1423         if (block_size < 0)
1424                 return block_size;
1425         /* init ipad and opad from key and xor with fixed values */
1426         memset(ipad, 0, block_size);
1427         memset(opad, 0, block_size);
1428
1429         if (auth_keylen > (unsigned int)block_size) {
1430                 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1431                 return -EFAULT;
1432         }
1433         rte_memcpy(ipad, auth_key, auth_keylen);
1434         rte_memcpy(opad, auth_key, auth_keylen);
1435
1436         for (i = 0; i < block_size; i++) {
1437                 uint8_t *ipad_ptr = ipad + i;
1438                 uint8_t *opad_ptr = opad + i;
1439                 *ipad_ptr ^= HMAC_IPAD_VALUE;
1440                 *opad_ptr ^= HMAC_OPAD_VALUE;
1441         }
1442
1443         /* do partial hash of ipad and copy to state1 */
1444         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1445                 memset(ipad, 0, block_size);
1446                 memset(opad, 0, block_size);
1447                 QAT_LOG(ERR, "ipad precompute failed");
1448                 return -EFAULT;
1449         }
1450
1451         /*
1452          * State len is a multiple of 8, so may be larger than the digest.
1453          * Put the partial hash of opad state_len bytes after state1
1454          */
1455         *p_state_len = qat_hash_get_state1_size(hash_alg);
1456         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1457                 memset(ipad, 0, block_size);
1458                 memset(opad, 0, block_size);
1459                 QAT_LOG(ERR, "opad precompute failed");
1460                 return -EFAULT;
1461         }
1462
1463         /*  don't leave data lying around */
1464         memset(ipad, 0, block_size);
1465         memset(opad, 0, block_size);
1466         return 0;
1467 }
1468
1469 static void
1470 qat_sym_session_init_common_hdr(struct qat_sym_session *session)
1471 {
1472         struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
1473         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1474         enum qat_sym_proto_flag proto_flags = session->qat_proto_flag;
1475         uint32_t slice_flags = session->slice_types;
1476
1477         header->hdr_flags =
1478                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1479         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1480         header->service_cmd_id = session->qat_cmd;
1481         header->comn_req_flags =
1482                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1483                                         QAT_COMN_PTR_TYPE_FLAT);
1484         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1485                                   ICP_QAT_FW_LA_PARTIAL_NONE);
1486         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1487                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1488
1489         switch (proto_flags)            {
1490         case QAT_CRYPTO_PROTO_FLAG_NONE:
1491                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1492                                         ICP_QAT_FW_LA_NO_PROTO);
1493                 break;
1494         case QAT_CRYPTO_PROTO_FLAG_CCM:
1495                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1496                                         ICP_QAT_FW_LA_CCM_PROTO);
1497                 break;
1498         case QAT_CRYPTO_PROTO_FLAG_GCM:
1499                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1500                                         ICP_QAT_FW_LA_GCM_PROTO);
1501                 break;
1502         case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1503                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1504                                         ICP_QAT_FW_LA_SNOW_3G_PROTO);
1505                 break;
1506         case QAT_CRYPTO_PROTO_FLAG_ZUC:
1507                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1508                         ICP_QAT_FW_LA_ZUC_3G_PROTO);
1509                 break;
1510         }
1511
1512         /* More than one of the following flags can be set at once */
1513         if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_SPC)) {
1514                 ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
1515                         header->serv_specif_flags,
1516                         ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
1517         }
1518         if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_UCS)) {
1519                 ICP_QAT_FW_LA_SLICE_TYPE_SET(
1520                         header->serv_specif_flags,
1521                         ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
1522         }
1523
1524         if (session->is_auth) {
1525                 if (session->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1526                         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1527                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1528                         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1529                                         ICP_QAT_FW_LA_CMP_AUTH_RES);
1530                 } else if (session->auth_op == ICP_QAT_HW_AUTH_GENERATE) {
1531                         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1532                                                 ICP_QAT_FW_LA_RET_AUTH_RES);
1533                         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1534                                                 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1535                 }
1536         } else {
1537                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1538                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1539                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1540                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1541         }
1542
1543         if (session->is_iv12B) {
1544                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1545                         header->serv_specif_flags,
1546                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1547         }
1548
1549         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1550                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
1551         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1552                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1553 }
1554
1555 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
1556                                                 const uint8_t *cipherkey,
1557                                                 uint32_t cipherkeylen)
1558 {
1559         struct icp_qat_hw_cipher_algo_blk *cipher;
1560         struct icp_qat_hw_cipher_algo_blk20 *cipher20;
1561         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1562         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1563         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1564         void *ptr = &req_tmpl->cd_ctrl;
1565         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1566         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1567         enum icp_qat_hw_cipher_convert key_convert;
1568         struct icp_qat_fw_la_cipher_20_req_params *req_ucs =
1569                         (struct icp_qat_fw_la_cipher_20_req_params *)
1570                         &cdesc->fw_req.serv_specif_rqpars;
1571         struct icp_qat_fw_la_cipher_req_params *req_cipher =
1572                         (struct icp_qat_fw_la_cipher_req_params *)
1573                         &cdesc->fw_req.serv_specif_rqpars;
1574         uint32_t total_key_size;
1575         uint16_t cipher_offset, cd_size;
1576         uint32_t wordIndex  = 0;
1577         uint32_t *temp_key = NULL;
1578
1579         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1580                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1581                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1582                                         ICP_QAT_FW_SLICE_CIPHER);
1583                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1584                                         ICP_QAT_FW_SLICE_DRAM_WR);
1585                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1586                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1587                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1588                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1589                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1590         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1591                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1592                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1593                                         ICP_QAT_FW_SLICE_CIPHER);
1594                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1595                                         ICP_QAT_FW_SLICE_AUTH);
1596                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1597                                         ICP_QAT_FW_SLICE_AUTH);
1598                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1599                                         ICP_QAT_FW_SLICE_DRAM_WR);
1600                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1601         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1602                 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1603                 return -EFAULT;
1604         }
1605
1606         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1607                 /*
1608                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
1609                  * Overriding default values previously set.
1610                  * Chacha20-Poly1305 is special case, CTR but single-pass
1611                  * so both direction need to be used.
1612                  */
1613                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1614                 if (cdesc->qat_cipher_alg ==
1615                         ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 &&
1616                         cdesc->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1617                                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
1618                 }
1619                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1620         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1621                 || cdesc->qat_cipher_alg ==
1622                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1623                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1624         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1625                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1626         else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE)
1627                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1628         else
1629                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1630
1631         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1632                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1633                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1634                 cipher_cd_ctrl->cipher_state_sz =
1635                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1636                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1637
1638         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1639                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1640                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1641                 cipher_cd_ctrl->cipher_padding_sz =
1642                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1643         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1644                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1645                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1646         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1647                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1648                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1649         } else if (cdesc->qat_cipher_alg ==
1650                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1651                 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1652                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1653                 cipher_cd_ctrl->cipher_state_sz =
1654                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1655                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1656                 cdesc->min_qat_dev_gen = QAT_GEN2;
1657         } else {
1658                 total_key_size = cipherkeylen;
1659                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1660         }
1661         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1662         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1663
1664         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1665         cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
1666         cipher->cipher_config.val =
1667             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1668                                         cdesc->qat_cipher_alg, key_convert,
1669                                         cdesc->qat_dir);
1670
1671         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1672                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1673                                         sizeof(struct icp_qat_hw_cipher_config)
1674                                         + cipherkeylen);
1675                 memcpy(cipher->key, cipherkey, cipherkeylen);
1676                 memcpy(temp_key, cipherkey, cipherkeylen);
1677
1678                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1679                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1680                                                                 wordIndex++)
1681                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1682
1683                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1684                                         cipherkeylen + cipherkeylen;
1685         } else if (cdesc->is_ucs) {
1686                 const uint8_t *final_key = cipherkey;
1687
1688                 cdesc->slice_types |= QAT_CRYPTO_SLICE_UCS;
1689                 total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
1690                         ICP_QAT_HW_AES_128_KEY_SZ);
1691                 cipher20->cipher_config.reserved[0] = 0;
1692                 cipher20->cipher_config.reserved[1] = 0;
1693                 cipher20->cipher_config.reserved[2] = 0;
1694
1695                 rte_memcpy(cipher20->key, final_key, cipherkeylen);
1696                 cdesc->cd_cur_ptr +=
1697                         sizeof(struct icp_qat_hw_ucs_cipher_config) +
1698                                         cipherkeylen;
1699         } else {
1700                 memcpy(cipher->key, cipherkey, cipherkeylen);
1701                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1702                                         cipherkeylen;
1703         }
1704
1705         if (cdesc->is_single_pass) {
1706                 QAT_FIELD_SET(cipher->cipher_config.val,
1707                         cdesc->digest_length,
1708                         QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
1709                         QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
1710                 /* UCS and SPC 1.8/2.0 share configuration of 2nd config word */
1711                 cdesc->cd.cipher.cipher_config.reserved =
1712                                 ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
1713                                         cdesc->aad_len);
1714                 cdesc->slice_types |= QAT_CRYPTO_SLICE_SPC;
1715         }
1716
1717         if (total_key_size > cipherkeylen) {
1718                 uint32_t padding_size =  total_key_size-cipherkeylen;
1719                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1720                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1721                         /* K3 not provided so use K1 = K3*/
1722                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1723                 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1724                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1725                         /* K2 and K3 not provided so use K1 = K2 = K3*/
1726                         memcpy(cdesc->cd_cur_ptr, cipherkey,
1727                                 cipherkeylen);
1728                         memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1729                                 cipherkey, cipherkeylen);
1730                 } else
1731                         memset(cdesc->cd_cur_ptr, 0, padding_size);
1732
1733                 cdesc->cd_cur_ptr += padding_size;
1734         }
1735         if (cdesc->is_ucs) {
1736                 /*
1737                  * These values match in terms of position auth
1738                  * slice request fields
1739                  */
1740                 req_ucs->spc_auth_res_sz = cdesc->digest_length;
1741                 if (!cdesc->is_gmac) {
1742                         req_ucs->spc_aad_sz = cdesc->aad_len;
1743                         req_ucs->spc_aad_offset = 0;
1744                 }
1745         } else if (cdesc->is_single_pass) {
1746                 req_cipher->spc_aad_sz = cdesc->aad_len;
1747                 req_cipher->spc_auth_res_sz = cdesc->digest_length;
1748         }
1749         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1750         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1751         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1752
1753         return 0;
1754 }
1755
1756 int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
1757                                                 const uint8_t *authkey,
1758                                                 uint32_t authkeylen,
1759                                                 uint32_t aad_length,
1760                                                 uint32_t digestsize,
1761                                                 unsigned int operation)
1762 {
1763         struct icp_qat_hw_auth_setup *hash;
1764         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1765         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1766         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1767         void *ptr = &req_tmpl->cd_ctrl;
1768         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1769         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1770         struct icp_qat_fw_la_auth_req_params *auth_param =
1771                 (struct icp_qat_fw_la_auth_req_params *)
1772                 ((char *)&req_tmpl->serv_specif_rqpars +
1773                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1774         uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1775         uint16_t hash_offset, cd_size;
1776         uint32_t *aad_len = NULL;
1777         uint32_t wordIndex  = 0;
1778         uint32_t *pTempKey;
1779
1780         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1781                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1782                                         ICP_QAT_FW_SLICE_AUTH);
1783                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1784                                         ICP_QAT_FW_SLICE_DRAM_WR);
1785                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1786         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1787                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1788                                 ICP_QAT_FW_SLICE_AUTH);
1789                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1790                                 ICP_QAT_FW_SLICE_CIPHER);
1791                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1792                                 ICP_QAT_FW_SLICE_CIPHER);
1793                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1794                                 ICP_QAT_FW_SLICE_DRAM_WR);
1795                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1796         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1797                 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1798                 return -EFAULT;
1799         }
1800
1801         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY)
1802                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
1803         else
1804                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
1805
1806         /*
1807          * Setup the inner hash config
1808          */
1809         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1810         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
1811         hash->auth_config.reserved = 0;
1812         hash->auth_config.config =
1813                         ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
1814                                 cdesc->qat_hash_alg, digestsize);
1815
1816         if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
1817                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
1818                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
1819                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
1820                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
1821                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
1822                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
1823                 || cdesc->is_cnt_zero
1824                         )
1825                 hash->auth_counter.counter = 0;
1826         else {
1827                 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
1828
1829                 if (block_size < 0)
1830                         return block_size;
1831                 hash->auth_counter.counter = rte_bswap32(block_size);
1832         }
1833
1834         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
1835
1836         /*
1837          * cd_cur_ptr now points at the state1 information.
1838          */
1839         switch (cdesc->qat_hash_alg) {
1840         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1841                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1842                         /* Plain SHA-1 */
1843                         rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
1844                                         sizeof(sha1InitialState));
1845                         state1_size = qat_hash_get_state1_size(
1846                                         cdesc->qat_hash_alg);
1847                         break;
1848                 }
1849                 /* SHA-1 HMAC */
1850                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
1851                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1852                         cdesc->aes_cmac)) {
1853                         QAT_LOG(ERR, "(SHA)precompute failed");
1854                         return -EFAULT;
1855                 }
1856                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
1857                 break;
1858         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1859                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1860                         /* Plain SHA-224 */
1861                         rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
1862                                         sizeof(sha224InitialState));
1863                         state1_size = qat_hash_get_state1_size(
1864                                         cdesc->qat_hash_alg);
1865                         break;
1866                 }
1867                 /* SHA-224 HMAC */
1868                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
1869                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1870                         cdesc->aes_cmac)) {
1871                         QAT_LOG(ERR, "(SHA)precompute failed");
1872                         return -EFAULT;
1873                 }
1874                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
1875                 break;
1876         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1877                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1878                         /* Plain SHA-256 */
1879                         rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
1880                                         sizeof(sha256InitialState));
1881                         state1_size = qat_hash_get_state1_size(
1882                                         cdesc->qat_hash_alg);
1883                         break;
1884                 }
1885                 /* SHA-256 HMAC */
1886                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
1887                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1888                         cdesc->aes_cmac)) {
1889                         QAT_LOG(ERR, "(SHA)precompute failed");
1890                         return -EFAULT;
1891                 }
1892                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
1893                 break;
1894         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1895                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1896                         /* Plain SHA-384 */
1897                         rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
1898                                         sizeof(sha384InitialState));
1899                         state1_size = qat_hash_get_state1_size(
1900                                         cdesc->qat_hash_alg);
1901                         break;
1902                 }
1903                 /* SHA-384 HMAC */
1904                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
1905                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1906                         cdesc->aes_cmac)) {
1907                         QAT_LOG(ERR, "(SHA)precompute failed");
1908                         return -EFAULT;
1909                 }
1910                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
1911                 break;
1912         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1913                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1914                         /* Plain SHA-512 */
1915                         rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
1916                                         sizeof(sha512InitialState));
1917                         state1_size = qat_hash_get_state1_size(
1918                                         cdesc->qat_hash_alg);
1919                         break;
1920                 }
1921                 /* SHA-512 HMAC */
1922                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
1923                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1924                         cdesc->aes_cmac)) {
1925                         QAT_LOG(ERR, "(SHA)precompute failed");
1926                         return -EFAULT;
1927                 }
1928                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
1929                 break;
1930         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1931                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1932
1933                 if (cdesc->aes_cmac)
1934                         memset(cdesc->cd_cur_ptr, 0, state1_size);
1935                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
1936                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
1937                         &state2_size, cdesc->aes_cmac)) {
1938                         cdesc->aes_cmac ? QAT_LOG(ERR,
1939                                                   "(CMAC)precompute failed")
1940                                         : QAT_LOG(ERR,
1941                                                   "(XCBC)precompute failed");
1942                         return -EFAULT;
1943                 }
1944                 break;
1945         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1946         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1947                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1948                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
1949                 if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
1950                         authkeylen, cdesc->cd_cur_ptr + state1_size,
1951                         &state2_size, cdesc->aes_cmac)) {
1952                         QAT_LOG(ERR, "(GCM)precompute failed");
1953                         return -EFAULT;
1954                 }
1955                 /*
1956                  * Write (the length of AAD) into bytes 16-19 of state2
1957                  * in big-endian format. This field is 8 bytes
1958                  */
1959                 auth_param->u2.aad_sz =
1960                                 RTE_ALIGN_CEIL(aad_length, 16);
1961                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
1962
1963                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
1964                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
1965                                         ICP_QAT_HW_GALOIS_H_SZ);
1966                 *aad_len = rte_bswap32(aad_length);
1967                 cdesc->aad_len = aad_length;
1968                 break;
1969         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1970                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1971                 state1_size = qat_hash_get_state1_size(
1972                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
1973                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
1974                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1975
1976                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
1977                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
1978                 cipherconfig->cipher_config.val =
1979                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
1980                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
1981                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
1982                         ICP_QAT_HW_CIPHER_ENCRYPT);
1983                 memcpy(cipherconfig->key, authkey, authkeylen);
1984                 memset(cipherconfig->key + authkeylen,
1985                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
1986                 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
1987                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1988                 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1989                 break;
1990         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1991                 hash->auth_config.config =
1992                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
1993                                 cdesc->qat_hash_alg, digestsize);
1994                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1995                 state1_size = qat_hash_get_state1_size(
1996                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
1997                 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
1998                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
1999                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
2000
2001                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2002                 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
2003                 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
2004                 cdesc->min_qat_dev_gen = QAT_GEN2;
2005
2006                 break;
2007         case ICP_QAT_HW_AUTH_ALGO_MD5:
2008                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
2009                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
2010                         cdesc->aes_cmac)) {
2011                         QAT_LOG(ERR, "(MD5)precompute failed");
2012                         return -EFAULT;
2013                 }
2014                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
2015                 break;
2016         case ICP_QAT_HW_AUTH_ALGO_NULL:
2017                 state1_size = qat_hash_get_state1_size(
2018                                 ICP_QAT_HW_AUTH_ALGO_NULL);
2019                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
2020                 break;
2021         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
2022                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
2023                 state1_size = qat_hash_get_state1_size(
2024                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
2025                 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
2026                                 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
2027
2028                 if (aad_length > 0) {
2029                         aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
2030                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
2031                         auth_param->u2.aad_sz =
2032                         RTE_ALIGN_CEIL(aad_length,
2033                         ICP_QAT_HW_CCM_AAD_ALIGNMENT);
2034                 } else {
2035                         auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
2036                 }
2037                 cdesc->aad_len = aad_length;
2038                 hash->auth_counter.counter = 0;
2039
2040                 hash_cd_ctrl->outer_prefix_sz = digestsize;
2041                 auth_param->hash_state_sz = digestsize;
2042
2043                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2044                 break;
2045         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
2046                 state1_size = qat_hash_get_state1_size(
2047                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
2048                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
2049                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
2050                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
2051                                                         + authkeylen);
2052                 /*
2053                 * The Inner Hash Initial State2 block must contain IK
2054                 * (Initialisation Key), followed by IK XOR-ed with KM
2055                 * (Key Modifier): IK||(IK^KM).
2056                 */
2057                 /* write the auth key */
2058                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2059                 /* initialise temp key with auth key */
2060                 memcpy(pTempKey, authkey, authkeylen);
2061                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
2062                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
2063                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
2064                 break;
2065         default:
2066                 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
2067                 return -EFAULT;
2068         }
2069
2070         /* Auth CD config setup */
2071         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
2072         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
2073         hash_cd_ctrl->inner_res_sz = digestsize;
2074         hash_cd_ctrl->final_sz = digestsize;
2075         hash_cd_ctrl->inner_state1_sz = state1_size;
2076         auth_param->auth_res_sz = digestsize;
2077
2078         hash_cd_ctrl->inner_state2_sz  = state2_size;
2079         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2080                         ((sizeof(struct icp_qat_hw_auth_setup) +
2081                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2082                                         >> 3);
2083
2084         cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2085         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2086
2087         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2088         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2089
2090         return 0;
2091 }
2092
2093 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2094 {
2095         switch (key_len) {
2096         case ICP_QAT_HW_AES_128_KEY_SZ:
2097                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2098                 break;
2099         case ICP_QAT_HW_AES_192_KEY_SZ:
2100                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2101                 break;
2102         case ICP_QAT_HW_AES_256_KEY_SZ:
2103                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2104                 break;
2105         default:
2106                 return -EINVAL;
2107         }
2108         return 0;
2109 }
2110
2111 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2112                 enum icp_qat_hw_cipher_algo *alg)
2113 {
2114         switch (key_len) {
2115         case ICP_QAT_HW_AES_128_KEY_SZ:
2116                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2117                 break;
2118         case ICP_QAT_HW_AES_256_KEY_SZ:
2119                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2120                 break;
2121         default:
2122                 return -EINVAL;
2123         }
2124         return 0;
2125 }
2126
2127 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2128 {
2129         switch (key_len) {
2130         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2131                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2132                 break;
2133         default:
2134                 return -EINVAL;
2135         }
2136         return 0;
2137 }
2138
2139 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2140 {
2141         switch (key_len) {
2142         case ICP_QAT_HW_KASUMI_KEY_SZ:
2143                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2144                 break;
2145         default:
2146                 return -EINVAL;
2147         }
2148         return 0;
2149 }
2150
2151 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2152 {
2153         switch (key_len) {
2154         case ICP_QAT_HW_DES_KEY_SZ:
2155                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2156                 break;
2157         default:
2158                 return -EINVAL;
2159         }
2160         return 0;
2161 }
2162
2163 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2164 {
2165         switch (key_len) {
2166         case QAT_3DES_KEY_SZ_OPT1:
2167         case QAT_3DES_KEY_SZ_OPT2:
2168         case QAT_3DES_KEY_SZ_OPT3:
2169                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2170                 break;
2171         default:
2172                 return -EINVAL;
2173         }
2174         return 0;
2175 }
2176
2177 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2178 {
2179         switch (key_len) {
2180         case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2181                 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2182                 break;
2183         default:
2184                 return -EINVAL;
2185         }
2186         return 0;
2187 }
2188
2189 #ifdef RTE_LIB_SECURITY
2190 static int
2191 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2192 {
2193         struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2194         struct rte_security_docsis_xform *docsis = &conf->docsis;
2195
2196         /* CRC generate -> Cipher encrypt */
2197         if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2198
2199                 if (crypto_sym != NULL &&
2200                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2201                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2202                     crypto_sym->cipher.algo ==
2203                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2204                     (crypto_sym->cipher.key.length ==
2205                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2206                      crypto_sym->cipher.key.length ==
2207                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2208                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2209                     crypto_sym->next == NULL) {
2210                         return 0;
2211                 }
2212         /* Cipher decrypt -> CRC verify */
2213         } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2214
2215                 if (crypto_sym != NULL &&
2216                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2217                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2218                     crypto_sym->cipher.algo ==
2219                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2220                     (crypto_sym->cipher.key.length ==
2221                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2222                      crypto_sym->cipher.key.length ==
2223                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2224                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2225                     crypto_sym->next == NULL) {
2226                         return 0;
2227                 }
2228         }
2229
2230         return -EINVAL;
2231 }
2232
2233 static int
2234 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2235                 struct rte_security_session_conf *conf, void *session_private)
2236 {
2237         int ret;
2238         int qat_cmd_id;
2239         struct rte_crypto_sym_xform *xform = NULL;
2240         struct qat_sym_session *session = session_private;
2241
2242         /* Clear the session */
2243         memset(session, 0, qat_sym_session_get_private_size(dev));
2244
2245         ret = qat_sec_session_check_docsis(conf);
2246         if (ret) {
2247                 QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2248                 return ret;
2249         }
2250
2251         xform = conf->crypto_xform;
2252
2253         /* Verify the session physical address is known */
2254         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2255         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2256                 QAT_LOG(ERR,
2257                         "Session physical address unknown. Bad memory pool.");
2258                 return -EINVAL;
2259         }
2260
2261         /* Set context descriptor physical address */
2262         session->cd_paddr = session_paddr +
2263                         offsetof(struct qat_sym_session, cd);
2264
2265         session->min_qat_dev_gen = QAT_GEN1;
2266
2267         /* Get requested QAT command id - should be cipher */
2268         qat_cmd_id = qat_get_cmd_id(xform);
2269         if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2270                 QAT_LOG(ERR, "Unsupported xform chain requested");
2271                 return -ENOTSUP;
2272         }
2273         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2274
2275         ret = qat_sym_session_configure_cipher(dev, xform, session);
2276         if (ret < 0)
2277                 return ret;
2278         qat_sym_session_finalize(session);
2279
2280         return 0;
2281 }
2282
2283 int
2284 qat_security_session_create(void *dev,
2285                                 struct rte_security_session_conf *conf,
2286                                 struct rte_security_session *sess,
2287                                 struct rte_mempool *mempool)
2288 {
2289         void *sess_private_data;
2290         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2291         int ret;
2292
2293         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2294                         conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2295                 QAT_LOG(ERR, "Invalid security protocol");
2296                 return -EINVAL;
2297         }
2298
2299         if (rte_mempool_get(mempool, &sess_private_data)) {
2300                 QAT_LOG(ERR, "Couldn't get object from session mempool");
2301                 return -ENOMEM;
2302         }
2303
2304         ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2305                         sess_private_data);
2306         if (ret != 0) {
2307                 QAT_LOG(ERR, "Failed to configure session parameters");
2308                 /* Return session to mempool */
2309                 rte_mempool_put(mempool, sess_private_data);
2310                 return ret;
2311         }
2312
2313         set_sec_session_private_data(sess, sess_private_data);
2314
2315         return ret;
2316 }
2317
2318 int
2319 qat_security_session_destroy(void *dev __rte_unused,
2320                                  struct rte_security_session *sess)
2321 {
2322         void *sess_priv = get_sec_session_private_data(sess);
2323         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2324
2325         if (sess_priv) {
2326                 if (s->bpi_ctx)
2327                         bpi_cipher_ctx_free(s->bpi_ctx);
2328                 memset(s, 0, qat_sym_session_get_private_size(dev));
2329                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2330
2331                 set_sec_session_private_data(sess, NULL);
2332                 rte_mempool_put(sess_mp, sess_priv);
2333         }
2334         return 0;
2335 }
2336 #endif