crypto/qat: rework burst data path
[dpdk.git] / drivers / crypto / qat / qat_sym_session.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  * Copyright(c) 2015-2022 Intel Corporation
3  */
4
5 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
6 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
7 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
8 #include <openssl/evp.h>        /* Needed for bpi runt block processing */
9
10 #include <rte_memcpy.h>
11 #include <rte_common.h>
12 #include <rte_spinlock.h>
13 #include <rte_byteorder.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_crypto_sym.h>
17 #ifdef RTE_LIB_SECURITY
18 #include <rte_security.h>
19 #endif
20
21 #include "qat_logs.h"
22 #include "qat_sym_session.h"
23 #include "qat_sym.h"
24
25 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
26 static const uint8_t sha1InitialState[] = {
27         0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
28         0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
29
30 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
31 static const uint8_t sha224InitialState[] = {
32         0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
33         0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
34         0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
35
36 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
37 static const uint8_t sha256InitialState[] = {
38         0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
39         0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
40         0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
41
42 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
43 static const uint8_t sha384InitialState[] = {
44         0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
45         0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
46         0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
47         0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
48         0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
49         0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
50
51 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
52 static const uint8_t sha512InitialState[] = {
53         0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
54         0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
55         0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
56         0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
57         0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
58         0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
59
60 static int
61 qat_sym_cd_cipher_set(struct qat_sym_session *cd,
62                                                 const uint8_t *enckey,
63                                                 uint32_t enckeylen);
64
65 static int
66 qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
67                                                 const uint8_t *authkey,
68                                                 uint32_t authkeylen,
69                                                 uint32_t aad_length,
70                                                 uint32_t digestsize,
71                                                 unsigned int operation);
72 static void
73 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
74
75 /* Req/cd init functions */
76
77 static void
78 qat_sym_session_finalize(struct qat_sym_session *session)
79 {
80         qat_sym_session_init_common_hdr(session);
81 }
82
83 /** Frees a context previously created
84  *  Depends on openssl libcrypto
85  */
86 static void
87 bpi_cipher_ctx_free(void *bpi_ctx)
88 {
89         if (bpi_ctx != NULL)
90                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
91 }
92
93 /** Creates a context in either AES or DES in ECB mode
94  *  Depends on openssl libcrypto
95  */
96 static int
97 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
98                 enum rte_crypto_cipher_operation direction __rte_unused,
99                 const uint8_t *key, uint16_t key_length, void **ctx)
100 {
101         const EVP_CIPHER *algo = NULL;
102         int ret;
103         *ctx = EVP_CIPHER_CTX_new();
104
105         if (*ctx == NULL) {
106                 ret = -ENOMEM;
107                 goto ctx_init_err;
108         }
109
110         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
111                 algo = EVP_des_ecb();
112         else
113                 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
114                         algo = EVP_aes_128_ecb();
115                 else
116                         algo = EVP_aes_256_ecb();
117
118         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
119         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
120                 ret = -EINVAL;
121                 goto ctx_init_err;
122         }
123
124         return 0;
125
126 ctx_init_err:
127         if (*ctx != NULL)
128                 EVP_CIPHER_CTX_free(*ctx);
129         return ret;
130 }
131
132 static int
133 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
134                 struct qat_cryptodev_private *internals)
135 {
136         int i = 0;
137         const struct rte_cryptodev_capabilities *capability;
138
139         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
140                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
141                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
142                         continue;
143
144                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
145                         continue;
146
147                 if (capability->sym.cipher.algo == algo)
148                         return 1;
149         }
150         return 0;
151 }
152
153 static int
154 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
155                 struct qat_cryptodev_private *internals)
156 {
157         int i = 0;
158         const struct rte_cryptodev_capabilities *capability;
159
160         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
161                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
162                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
163                         continue;
164
165                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
166                         continue;
167
168                 if (capability->sym.auth.algo == algo)
169                         return 1;
170         }
171         return 0;
172 }
173
174 void
175 qat_sym_session_clear(struct rte_cryptodev *dev,
176                 struct rte_cryptodev_sym_session *sess)
177 {
178         uint8_t index = dev->driver_id;
179         void *sess_priv = get_sym_session_private_data(sess, index);
180         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
181
182         if (sess_priv) {
183                 if (s->bpi_ctx)
184                         bpi_cipher_ctx_free(s->bpi_ctx);
185                 memset(s, 0, qat_sym_session_get_private_size(dev));
186                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
187
188                 set_sym_session_private_data(sess, index, NULL);
189                 rte_mempool_put(sess_mp, sess_priv);
190         }
191 }
192
193 static int
194 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
195 {
196         /* Cipher Only */
197         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
198                 return ICP_QAT_FW_LA_CMD_CIPHER;
199
200         /* Authentication Only */
201         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
202                 return ICP_QAT_FW_LA_CMD_AUTH;
203
204         /* AEAD */
205         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
206                 /* AES-GCM and AES-CCM works with different direction
207                  * GCM first encrypts and generate hash where AES-CCM
208                  * first generate hash and encrypts. Similar relation
209                  * applies to decryption.
210                  */
211                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
212                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
213                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
214                         else
215                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
216                 else
217                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
218                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
219                         else
220                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
221         }
222
223         if (xform->next == NULL)
224                 return -1;
225
226         /* Cipher then Authenticate */
227         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
228                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
229                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
230
231         /* Authenticate then Cipher */
232         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
233                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
234                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
235
236         return -1;
237 }
238
239 static struct rte_crypto_auth_xform *
240 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
241 {
242         do {
243                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
244                         return &xform->auth;
245
246                 xform = xform->next;
247         } while (xform);
248
249         return NULL;
250 }
251
252 static struct rte_crypto_cipher_xform *
253 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
254 {
255         do {
256                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
257                         return &xform->cipher;
258
259                 xform = xform->next;
260         } while (xform);
261
262         return NULL;
263 }
264
265 int
266 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
267                 struct rte_crypto_sym_xform *xform,
268                 struct qat_sym_session *session)
269 {
270         struct qat_cryptodev_private *internals = dev->data->dev_private;
271         struct rte_crypto_cipher_xform *cipher_xform = NULL;
272         enum qat_device_gen qat_dev_gen =
273                                 internals->qat_dev->qat_dev_gen;
274         int ret;
275
276         /* Get cipher xform from crypto xform chain */
277         cipher_xform = qat_get_cipher_xform(xform);
278
279         session->cipher_iv.offset = cipher_xform->iv.offset;
280         session->cipher_iv.length = cipher_xform->iv.length;
281
282         switch (cipher_xform->algo) {
283         case RTE_CRYPTO_CIPHER_AES_CBC:
284                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
285                                 &session->qat_cipher_alg) != 0) {
286                         QAT_LOG(ERR, "Invalid AES cipher key size");
287                         ret = -EINVAL;
288                         goto error_out;
289                 }
290                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
291                 break;
292         case RTE_CRYPTO_CIPHER_AES_CTR:
293                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
294                                 &session->qat_cipher_alg) != 0) {
295                         QAT_LOG(ERR, "Invalid AES cipher key size");
296                         ret = -EINVAL;
297                         goto error_out;
298                 }
299                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
300                 if (qat_dev_gen == QAT_GEN4)
301                         session->is_ucs = 1;
302                 break;
303         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
304                 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
305                                         &session->qat_cipher_alg) != 0) {
306                         QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
307                         ret = -EINVAL;
308                         goto error_out;
309                 }
310                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
311                 break;
312         case RTE_CRYPTO_CIPHER_NULL:
313                 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
314                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
315                 break;
316         case RTE_CRYPTO_CIPHER_KASUMI_F8:
317                 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
318                                         &session->qat_cipher_alg) != 0) {
319                         QAT_LOG(ERR, "Invalid KASUMI cipher key size");
320                         ret = -EINVAL;
321                         goto error_out;
322                 }
323                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
324                 break;
325         case RTE_CRYPTO_CIPHER_3DES_CBC:
326                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
327                                 &session->qat_cipher_alg) != 0) {
328                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
329                         ret = -EINVAL;
330                         goto error_out;
331                 }
332                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
333                 break;
334         case RTE_CRYPTO_CIPHER_DES_CBC:
335                 if (qat_sym_validate_des_key(cipher_xform->key.length,
336                                 &session->qat_cipher_alg) != 0) {
337                         QAT_LOG(ERR, "Invalid DES cipher key size");
338                         ret = -EINVAL;
339                         goto error_out;
340                 }
341                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
342                 break;
343         case RTE_CRYPTO_CIPHER_3DES_CTR:
344                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
345                                 &session->qat_cipher_alg) != 0) {
346                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
347                         ret = -EINVAL;
348                         goto error_out;
349                 }
350                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
351                 break;
352         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
353                 ret = bpi_cipher_ctx_init(
354                                         cipher_xform->algo,
355                                         cipher_xform->op,
356                                         cipher_xform->key.data,
357                                         cipher_xform->key.length,
358                                         &session->bpi_ctx);
359                 if (ret != 0) {
360                         QAT_LOG(ERR, "failed to create DES BPI ctx");
361                         goto error_out;
362                 }
363                 if (qat_sym_validate_des_key(cipher_xform->key.length,
364                                 &session->qat_cipher_alg) != 0) {
365                         QAT_LOG(ERR, "Invalid DES cipher key size");
366                         ret = -EINVAL;
367                         goto error_out;
368                 }
369                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
370                 break;
371         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
372                 ret = bpi_cipher_ctx_init(
373                                         cipher_xform->algo,
374                                         cipher_xform->op,
375                                         cipher_xform->key.data,
376                                         cipher_xform->key.length,
377                                         &session->bpi_ctx);
378                 if (ret != 0) {
379                         QAT_LOG(ERR, "failed to create AES BPI ctx");
380                         goto error_out;
381                 }
382                 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
383                                 &session->qat_cipher_alg) != 0) {
384                         QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
385                         ret = -EINVAL;
386                         goto error_out;
387                 }
388                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
389                 break;
390         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
391                 if (!qat_is_cipher_alg_supported(
392                         cipher_xform->algo, internals)) {
393                         QAT_LOG(ERR, "%s not supported on this device",
394                                 rte_crypto_cipher_algorithm_strings
395                                         [cipher_xform->algo]);
396                         ret = -ENOTSUP;
397                         goto error_out;
398                 }
399                 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
400                                 &session->qat_cipher_alg) != 0) {
401                         QAT_LOG(ERR, "Invalid ZUC cipher key size");
402                         ret = -EINVAL;
403                         goto error_out;
404                 }
405                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
406                 break;
407         case RTE_CRYPTO_CIPHER_AES_XTS:
408                 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
409                         QAT_LOG(ERR, "AES-XTS-192 not supported");
410                         ret = -EINVAL;
411                         goto error_out;
412                 }
413                 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
414                                 &session->qat_cipher_alg) != 0) {
415                         QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
416                         ret = -EINVAL;
417                         goto error_out;
418                 }
419                 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
420                 break;
421         case RTE_CRYPTO_CIPHER_3DES_ECB:
422         case RTE_CRYPTO_CIPHER_AES_ECB:
423         case RTE_CRYPTO_CIPHER_AES_F8:
424         case RTE_CRYPTO_CIPHER_ARC4:
425                 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
426                                 cipher_xform->algo);
427                 ret = -ENOTSUP;
428                 goto error_out;
429         default:
430                 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
431                                 cipher_xform->algo);
432                 ret = -EINVAL;
433                 goto error_out;
434         }
435
436         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
437                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
438         else
439                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
440
441         if (qat_sym_cd_cipher_set(session,
442                                                 cipher_xform->key.data,
443                                                 cipher_xform->key.length)) {
444                 ret = -EINVAL;
445                 goto error_out;
446         }
447
448         return 0;
449
450 error_out:
451         if (session->bpi_ctx) {
452                 bpi_cipher_ctx_free(session->bpi_ctx);
453                 session->bpi_ctx = NULL;
454         }
455         return ret;
456 }
457
458 int
459 qat_sym_session_configure(struct rte_cryptodev *dev,
460                 struct rte_crypto_sym_xform *xform,
461                 struct rte_cryptodev_sym_session *sess,
462                 struct rte_mempool *mempool)
463 {
464         void *sess_private_data;
465         int ret;
466
467         if (rte_mempool_get(mempool, &sess_private_data)) {
468                 CDEV_LOG_ERR(
469                         "Couldn't get object from session mempool");
470                 return -ENOMEM;
471         }
472
473         ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
474         if (ret != 0) {
475                 QAT_LOG(ERR,
476                     "Crypto QAT PMD: failed to configure session parameters");
477
478                 /* Return session to mempool */
479                 rte_mempool_put(mempool, sess_private_data);
480                 return ret;
481         }
482
483         set_sym_session_private_data(sess, dev->driver_id,
484                 sess_private_data);
485
486         return 0;
487 }
488
489 int
490 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
491                 struct rte_crypto_sym_xform *xform, void *session_private)
492 {
493         struct qat_sym_session *session = session_private;
494         struct qat_cryptodev_private *internals = dev->data->dev_private;
495         enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
496         int ret;
497         int qat_cmd_id;
498
499         /* Verify the session physical address is known */
500         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
501         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
502                 QAT_LOG(ERR,
503                         "Session physical address unknown. Bad memory pool.");
504                 return -EINVAL;
505         }
506
507         memset(session, 0, sizeof(*session));
508         /* Set context descriptor physical address */
509         session->cd_paddr = session_paddr +
510                         offsetof(struct qat_sym_session, cd);
511
512         session->dev_id = internals->dev_id;
513         session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
514         session->is_ucs = 0;
515
516         /* Get requested QAT command id */
517         qat_cmd_id = qat_get_cmd_id(xform);
518         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
519                 QAT_LOG(ERR, "Unsupported xform chain requested");
520                 return -ENOTSUP;
521         }
522         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
523         switch (session->qat_cmd) {
524         case ICP_QAT_FW_LA_CMD_CIPHER:
525                 ret = qat_sym_session_configure_cipher(dev, xform, session);
526                 if (ret < 0)
527                         return ret;
528                 break;
529         case ICP_QAT_FW_LA_CMD_AUTH:
530                 ret = qat_sym_session_configure_auth(dev, xform, session);
531                 if (ret < 0)
532                         return ret;
533                 session->is_single_pass_gmac =
534                                qat_dev_gen == QAT_GEN3 &&
535                                xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
536                                xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
537                 break;
538         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
539                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
540                         ret = qat_sym_session_configure_aead(dev, xform,
541                                         session);
542                         if (ret < 0)
543                                 return ret;
544                 } else {
545                         ret = qat_sym_session_configure_cipher(dev,
546                                         xform, session);
547                         if (ret < 0)
548                                 return ret;
549                         ret = qat_sym_session_configure_auth(dev,
550                                         xform, session);
551                         if (ret < 0)
552                                 return ret;
553                 }
554                 break;
555         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
556                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
557                         ret = qat_sym_session_configure_aead(dev, xform,
558                                         session);
559                         if (ret < 0)
560                                 return ret;
561                 } else {
562                         ret = qat_sym_session_configure_auth(dev,
563                                         xform, session);
564                         if (ret < 0)
565                                 return ret;
566                         ret = qat_sym_session_configure_cipher(dev,
567                                         xform, session);
568                         if (ret < 0)
569                                 return ret;
570                 }
571                 break;
572         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
573         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
574         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
575         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
576         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
577         case ICP_QAT_FW_LA_CMD_MGF1:
578         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
579         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
580         case ICP_QAT_FW_LA_CMD_DELIMITER:
581         QAT_LOG(ERR, "Unsupported Service %u",
582                 session->qat_cmd);
583                 return -ENOTSUP;
584         default:
585         QAT_LOG(ERR, "Unsupported Service %u",
586                 session->qat_cmd);
587                 return -ENOTSUP;
588         }
589         qat_sym_session_finalize(session);
590
591         return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
592                         (void *)session);
593 }
594
595 static int
596 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
597                 const struct rte_crypto_aead_xform *aead_xform)
598 {
599         session->is_single_pass = 1;
600         session->is_auth = 1;
601         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
602         /* Chacha-Poly is special case that use QAT CTR mode */
603         if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
604                 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
605         else
606                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
607
608         session->cipher_iv.offset = aead_xform->iv.offset;
609         session->cipher_iv.length = aead_xform->iv.length;
610         session->aad_len = aead_xform->aad_length;
611         session->digest_length = aead_xform->digest_length;
612
613         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
614                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
615                 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
616         } else {
617                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
618                 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
619         }
620
621         return 0;
622 }
623
624 int
625 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
626                                 struct rte_crypto_sym_xform *xform,
627                                 struct qat_sym_session *session)
628 {
629         struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
630         struct qat_cryptodev_private *internals = dev->data->dev_private;
631         const uint8_t *key_data = auth_xform->key.data;
632         uint8_t key_length = auth_xform->key.length;
633         enum qat_device_gen qat_dev_gen =
634                         internals->qat_dev->qat_dev_gen;
635
636         session->aes_cmac = 0;
637         session->auth_key_length = auth_xform->key.length;
638         session->auth_iv.offset = auth_xform->iv.offset;
639         session->auth_iv.length = auth_xform->iv.length;
640         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
641         session->is_auth = 1;
642         session->digest_length = auth_xform->digest_length;
643
644         switch (auth_xform->algo) {
645         case RTE_CRYPTO_AUTH_SHA1:
646                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
647                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
648                 break;
649         case RTE_CRYPTO_AUTH_SHA224:
650                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
651                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
652                 break;
653         case RTE_CRYPTO_AUTH_SHA256:
654                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
655                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
656                 break;
657         case RTE_CRYPTO_AUTH_SHA384:
658                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
659                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
660                 break;
661         case RTE_CRYPTO_AUTH_SHA512:
662                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
663                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
664                 break;
665         case RTE_CRYPTO_AUTH_SHA1_HMAC:
666                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
667                 break;
668         case RTE_CRYPTO_AUTH_SHA224_HMAC:
669                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
670                 break;
671         case RTE_CRYPTO_AUTH_SHA256_HMAC:
672                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
673                 break;
674         case RTE_CRYPTO_AUTH_SHA384_HMAC:
675                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
676                 break;
677         case RTE_CRYPTO_AUTH_SHA512_HMAC:
678                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
679                 break;
680         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
681                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
682                 break;
683         case RTE_CRYPTO_AUTH_AES_CMAC:
684                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
685                 session->aes_cmac = 1;
686                 break;
687         case RTE_CRYPTO_AUTH_AES_GMAC:
688                 if (qat_sym_validate_aes_key(auth_xform->key.length,
689                                 &session->qat_cipher_alg) != 0) {
690                         QAT_LOG(ERR, "Invalid AES key size");
691                         return -EINVAL;
692                 }
693                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
694                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
695                 if (session->auth_iv.length == 0)
696                         session->auth_iv.length = AES_GCM_J0_LEN;
697                 else
698                         session->is_iv12B = 1;
699                 if (qat_dev_gen == QAT_GEN4) {
700                         session->is_cnt_zero = 1;
701                         session->is_ucs = 1;
702                 }
703                 break;
704         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
705                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
706                 break;
707         case RTE_CRYPTO_AUTH_MD5_HMAC:
708                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
709                 break;
710         case RTE_CRYPTO_AUTH_NULL:
711                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
712                 break;
713         case RTE_CRYPTO_AUTH_KASUMI_F9:
714                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
715                 break;
716         case RTE_CRYPTO_AUTH_ZUC_EIA3:
717                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
718                         QAT_LOG(ERR, "%s not supported on this device",
719                                 rte_crypto_auth_algorithm_strings
720                                 [auth_xform->algo]);
721                         return -ENOTSUP;
722                 }
723                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
724                 break;
725         case RTE_CRYPTO_AUTH_MD5:
726         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
727                 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
728                                 auth_xform->algo);
729                 return -ENOTSUP;
730         default:
731                 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
732                                 auth_xform->algo);
733                 return -EINVAL;
734         }
735
736         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
737                 session->is_gmac = 1;
738                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
739                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
740                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
741                         /*
742                          * It needs to create cipher desc content first,
743                          * then authentication
744                          */
745                         if (qat_sym_cd_cipher_set(session,
746                                                 auth_xform->key.data,
747                                                 auth_xform->key.length))
748                                 return -EINVAL;
749
750                         if (qat_sym_cd_auth_set(session,
751                                                 key_data,
752                                                 key_length,
753                                                 0,
754                                                 auth_xform->digest_length,
755                                                 auth_xform->op))
756                                 return -EINVAL;
757                 } else {
758                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
759                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
760                         /*
761                          * It needs to create authentication desc content first,
762                          * then cipher
763                          */
764
765                         if (qat_sym_cd_auth_set(session,
766                                         key_data,
767                                         key_length,
768                                         0,
769                                         auth_xform->digest_length,
770                                         auth_xform->op))
771                                 return -EINVAL;
772
773                         if (qat_sym_cd_cipher_set(session,
774                                                 auth_xform->key.data,
775                                                 auth_xform->key.length))
776                                 return -EINVAL;
777                 }
778         } else {
779                 if (qat_sym_cd_auth_set(session,
780                                 key_data,
781                                 key_length,
782                                 0,
783                                 auth_xform->digest_length,
784                                 auth_xform->op))
785                         return -EINVAL;
786         }
787
788         return 0;
789 }
790
791 int
792 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
793                                 struct rte_crypto_sym_xform *xform,
794                                 struct qat_sym_session *session)
795 {
796         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
797         enum rte_crypto_auth_operation crypto_operation;
798         struct qat_cryptodev_private *internals =
799                         dev->data->dev_private;
800         enum qat_device_gen qat_dev_gen =
801                         internals->qat_dev->qat_dev_gen;
802
803         /*
804          * Store AEAD IV parameters as cipher IV,
805          * to avoid unnecessary memory usage
806          */
807         session->cipher_iv.offset = xform->aead.iv.offset;
808         session->cipher_iv.length = xform->aead.iv.length;
809
810         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
811         session->is_auth = 1;
812         session->digest_length = aead_xform->digest_length;
813
814         session->is_single_pass = 0;
815         switch (aead_xform->algo) {
816         case RTE_CRYPTO_AEAD_AES_GCM:
817                 if (qat_sym_validate_aes_key(aead_xform->key.length,
818                                 &session->qat_cipher_alg) != 0) {
819                         QAT_LOG(ERR, "Invalid AES key size");
820                         return -EINVAL;
821                 }
822                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
823                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
824
825                 if (qat_dev_gen == QAT_GEN4)
826                         session->is_ucs = 1;
827                 if (session->cipher_iv.length == 0) {
828                         session->cipher_iv.length = AES_GCM_J0_LEN;
829                         break;
830                 }
831                 session->is_iv12B = 1;
832                 if (qat_dev_gen < QAT_GEN3)
833                         break;
834                 qat_sym_session_handle_single_pass(session,
835                                 aead_xform);
836                 break;
837         case RTE_CRYPTO_AEAD_AES_CCM:
838                 if (qat_sym_validate_aes_key(aead_xform->key.length,
839                                 &session->qat_cipher_alg) != 0) {
840                         QAT_LOG(ERR, "Invalid AES key size");
841                         return -EINVAL;
842                 }
843                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
844                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
845                 if (qat_dev_gen == QAT_GEN4)
846                         session->is_ucs = 1;
847                 break;
848         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
849                 if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
850                         return -EINVAL;
851                 if (qat_dev_gen == QAT_GEN4)
852                         session->is_ucs = 1;
853                 session->qat_cipher_alg =
854                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
855                 qat_sym_session_handle_single_pass(session,
856                                                 aead_xform);
857                 break;
858         default:
859                 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
860                                 aead_xform->algo);
861                 return -EINVAL;
862         }
863
864         if (session->is_single_pass) {
865                 if (qat_sym_cd_cipher_set(session,
866                                 aead_xform->key.data, aead_xform->key.length))
867                         return -EINVAL;
868         } else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
869                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
870                         (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
871                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
872                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
873                 /*
874                  * It needs to create cipher desc content first,
875                  * then authentication
876                  */
877                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
878                         RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
879
880                 if (qat_sym_cd_cipher_set(session,
881                                         aead_xform->key.data,
882                                         aead_xform->key.length))
883                         return -EINVAL;
884
885                 if (qat_sym_cd_auth_set(session,
886                                         aead_xform->key.data,
887                                         aead_xform->key.length,
888                                         aead_xform->aad_length,
889                                         aead_xform->digest_length,
890                                         crypto_operation))
891                         return -EINVAL;
892         } else {
893                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
894                 /*
895                  * It needs to create authentication desc content first,
896                  * then cipher
897                  */
898
899                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
900                         RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
901
902                 if (qat_sym_cd_auth_set(session,
903                                         aead_xform->key.data,
904                                         aead_xform->key.length,
905                                         aead_xform->aad_length,
906                                         aead_xform->digest_length,
907                                         crypto_operation))
908                         return -EINVAL;
909
910                 if (qat_sym_cd_cipher_set(session,
911                                         aead_xform->key.data,
912                                         aead_xform->key.length))
913                         return -EINVAL;
914         }
915
916         return 0;
917 }
918
919 unsigned int qat_sym_session_get_private_size(
920                 struct rte_cryptodev *dev __rte_unused)
921 {
922         return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
923 }
924
925 /* returns block size in bytes per cipher algo */
926 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
927 {
928         switch (qat_cipher_alg) {
929         case ICP_QAT_HW_CIPHER_ALGO_DES:
930                 return ICP_QAT_HW_DES_BLK_SZ;
931         case ICP_QAT_HW_CIPHER_ALGO_3DES:
932                 return ICP_QAT_HW_3DES_BLK_SZ;
933         case ICP_QAT_HW_CIPHER_ALGO_AES128:
934         case ICP_QAT_HW_CIPHER_ALGO_AES192:
935         case ICP_QAT_HW_CIPHER_ALGO_AES256:
936                 return ICP_QAT_HW_AES_BLK_SZ;
937         default:
938                 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
939                 return -EFAULT;
940         };
941         return -EFAULT;
942 }
943
944 /*
945  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
946  * This is digest size rounded up to nearest quadword
947  */
948 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
949 {
950         switch (qat_hash_alg) {
951         case ICP_QAT_HW_AUTH_ALGO_SHA1:
952                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
953                                                 QAT_HW_DEFAULT_ALIGNMENT);
954         case ICP_QAT_HW_AUTH_ALGO_SHA224:
955                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
956                                                 QAT_HW_DEFAULT_ALIGNMENT);
957         case ICP_QAT_HW_AUTH_ALGO_SHA256:
958                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
959                                                 QAT_HW_DEFAULT_ALIGNMENT);
960         case ICP_QAT_HW_AUTH_ALGO_SHA384:
961                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
962                                                 QAT_HW_DEFAULT_ALIGNMENT);
963         case ICP_QAT_HW_AUTH_ALGO_SHA512:
964                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
965                                                 QAT_HW_DEFAULT_ALIGNMENT);
966         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
967                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
968                                                 QAT_HW_DEFAULT_ALIGNMENT);
969         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
970         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
971                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
972                                                 QAT_HW_DEFAULT_ALIGNMENT);
973         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
974                 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
975                                                 QAT_HW_DEFAULT_ALIGNMENT);
976         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
977                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
978                                                 QAT_HW_DEFAULT_ALIGNMENT);
979         case ICP_QAT_HW_AUTH_ALGO_MD5:
980                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
981                                                 QAT_HW_DEFAULT_ALIGNMENT);
982         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
983                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
984                                                 QAT_HW_DEFAULT_ALIGNMENT);
985         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
986                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
987                                                 QAT_HW_DEFAULT_ALIGNMENT);
988         case ICP_QAT_HW_AUTH_ALGO_NULL:
989                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
990                                                 QAT_HW_DEFAULT_ALIGNMENT);
991         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
992                 /* return maximum state1 size in this case */
993                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
994                                                 QAT_HW_DEFAULT_ALIGNMENT);
995         default:
996                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
997                 return -EFAULT;
998         };
999         return -EFAULT;
1000 }
1001
1002 /* returns digest size in bytes  per hash algo */
1003 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1004 {
1005         switch (qat_hash_alg) {
1006         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1007                 return ICP_QAT_HW_SHA1_STATE1_SZ;
1008         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1009                 return ICP_QAT_HW_SHA224_STATE1_SZ;
1010         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1011                 return ICP_QAT_HW_SHA256_STATE1_SZ;
1012         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1013                 return ICP_QAT_HW_SHA384_STATE1_SZ;
1014         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1015                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1016         case ICP_QAT_HW_AUTH_ALGO_MD5:
1017                 return ICP_QAT_HW_MD5_STATE1_SZ;
1018         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1019                 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1020         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1021                 /* return maximum digest size in this case */
1022                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1023         default:
1024                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1025                 return -EFAULT;
1026         };
1027         return -EFAULT;
1028 }
1029
1030 /* returns block size in byes per hash algo */
1031 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1032 {
1033         switch (qat_hash_alg) {
1034         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1035                 return SHA_CBLOCK;
1036         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1037                 return SHA256_CBLOCK;
1038         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1039                 return SHA256_CBLOCK;
1040         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1041                 return SHA512_CBLOCK;
1042         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1043                 return SHA512_CBLOCK;
1044         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1045                 return 16;
1046         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1047                 return ICP_QAT_HW_AES_BLK_SZ;
1048         case ICP_QAT_HW_AUTH_ALGO_MD5:
1049                 return MD5_CBLOCK;
1050         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1051                 /* return maximum block size in this case */
1052                 return SHA512_CBLOCK;
1053         default:
1054                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1055                 return -EFAULT;
1056         };
1057         return -EFAULT;
1058 }
1059
1060 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1061 {
1062         SHA_CTX ctx;
1063
1064         if (!SHA1_Init(&ctx))
1065                 return -EFAULT;
1066         SHA1_Transform(&ctx, data_in);
1067         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1068         return 0;
1069 }
1070
1071 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1072 {
1073         SHA256_CTX ctx;
1074
1075         if (!SHA224_Init(&ctx))
1076                 return -EFAULT;
1077         SHA256_Transform(&ctx, data_in);
1078         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1079         return 0;
1080 }
1081
1082 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1083 {
1084         SHA256_CTX ctx;
1085
1086         if (!SHA256_Init(&ctx))
1087                 return -EFAULT;
1088         SHA256_Transform(&ctx, data_in);
1089         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1090         return 0;
1091 }
1092
1093 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1094 {
1095         SHA512_CTX ctx;
1096
1097         if (!SHA384_Init(&ctx))
1098                 return -EFAULT;
1099         SHA512_Transform(&ctx, data_in);
1100         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1101         return 0;
1102 }
1103
1104 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1105 {
1106         SHA512_CTX ctx;
1107
1108         if (!SHA512_Init(&ctx))
1109                 return -EFAULT;
1110         SHA512_Transform(&ctx, data_in);
1111         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1112         return 0;
1113 }
1114
1115 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1116 {
1117         MD5_CTX ctx;
1118
1119         if (!MD5_Init(&ctx))
1120                 return -EFAULT;
1121         MD5_Transform(&ctx, data_in);
1122         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1123
1124         return 0;
1125 }
1126
1127 static int
1128 partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1129                 uint8_t *data_in, uint8_t *data_out)
1130 {
1131         int digest_size;
1132         uint8_t digest[qat_hash_get_digest_size(
1133                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1134         uint32_t *hash_state_out_be32;
1135         uint64_t *hash_state_out_be64;
1136         int i;
1137
1138         /* Initialize to avoid gcc warning */
1139         memset(digest, 0, sizeof(digest));
1140
1141         digest_size = qat_hash_get_digest_size(hash_alg);
1142         if (digest_size <= 0)
1143                 return -EFAULT;
1144
1145         hash_state_out_be32 = (uint32_t *)data_out;
1146         hash_state_out_be64 = (uint64_t *)data_out;
1147
1148         switch (hash_alg) {
1149         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1150                 if (partial_hash_sha1(data_in, digest))
1151                         return -EFAULT;
1152                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1153                         *hash_state_out_be32 =
1154                                 rte_bswap32(*(((uint32_t *)digest)+i));
1155                 break;
1156         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1157                 if (partial_hash_sha224(data_in, digest))
1158                         return -EFAULT;
1159                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1160                         *hash_state_out_be32 =
1161                                 rte_bswap32(*(((uint32_t *)digest)+i));
1162                 break;
1163         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1164                 if (partial_hash_sha256(data_in, digest))
1165                         return -EFAULT;
1166                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1167                         *hash_state_out_be32 =
1168                                 rte_bswap32(*(((uint32_t *)digest)+i));
1169                 break;
1170         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1171                 if (partial_hash_sha384(data_in, digest))
1172                         return -EFAULT;
1173                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1174                         *hash_state_out_be64 =
1175                                 rte_bswap64(*(((uint64_t *)digest)+i));
1176                 break;
1177         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1178                 if (partial_hash_sha512(data_in, digest))
1179                         return -EFAULT;
1180                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1181                         *hash_state_out_be64 =
1182                                 rte_bswap64(*(((uint64_t *)digest)+i));
1183                 break;
1184         case ICP_QAT_HW_AUTH_ALGO_MD5:
1185                 if (partial_hash_md5(data_in, data_out))
1186                         return -EFAULT;
1187                 break;
1188         default:
1189                 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1190                 return -EFAULT;
1191         }
1192
1193         return 0;
1194 }
1195 #define HMAC_IPAD_VALUE 0x36
1196 #define HMAC_OPAD_VALUE 0x5c
1197 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1198
1199 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1200
1201 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1202 {
1203         int i;
1204
1205         derived[0] = base[0] << 1;
1206         for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1207                 derived[i] = base[i] << 1;
1208                 derived[i - 1] |= base[i] >> 7;
1209         }
1210
1211         if (base[0] & 0x80)
1212                 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1213 }
1214
1215 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1216                                 const uint8_t *auth_key,
1217                                 uint16_t auth_keylen,
1218                                 uint8_t *p_state_buf,
1219                                 uint16_t *p_state_len,
1220                                 uint8_t aes_cmac)
1221 {
1222         int block_size;
1223         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1224         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1225         int i;
1226
1227         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1228
1229                 /* CMAC */
1230                 if (aes_cmac) {
1231                         AES_KEY enc_key;
1232                         uint8_t *in = NULL;
1233                         uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1234                         uint8_t *k1, *k2;
1235
1236                         auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1237
1238                         in = rte_zmalloc("AES CMAC K1",
1239                                          ICP_QAT_HW_AES_128_KEY_SZ, 16);
1240
1241                         if (in == NULL) {
1242                                 QAT_LOG(ERR, "Failed to alloc memory");
1243                                 return -ENOMEM;
1244                         }
1245
1246                         rte_memcpy(in, AES_CMAC_SEED,
1247                                    ICP_QAT_HW_AES_128_KEY_SZ);
1248                         rte_memcpy(p_state_buf, auth_key, auth_keylen);
1249
1250                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1251                                 &enc_key) != 0) {
1252                                 rte_free(in);
1253                                 return -EFAULT;
1254                         }
1255
1256                         AES_encrypt(in, k0, &enc_key);
1257
1258                         k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1259                         k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1260
1261                         aes_cmac_key_derive(k0, k1);
1262                         aes_cmac_key_derive(k1, k2);
1263
1264                         memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1265                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1266                         rte_free(in);
1267                         return 0;
1268                 } else {
1269                         static uint8_t qat_aes_xcbc_key_seed[
1270                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1271                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1272                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1273                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1274                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1275                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1276                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1277                         };
1278
1279                         uint8_t *in = NULL;
1280                         uint8_t *out = p_state_buf;
1281                         int x;
1282                         AES_KEY enc_key;
1283
1284                         in = rte_zmalloc("working mem for key",
1285                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1286                         if (in == NULL) {
1287                                 QAT_LOG(ERR, "Failed to alloc memory");
1288                                 return -ENOMEM;
1289                         }
1290
1291                         rte_memcpy(in, qat_aes_xcbc_key_seed,
1292                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1293                         for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1294                                 if (AES_set_encrypt_key(auth_key,
1295                                                         auth_keylen << 3,
1296                                                         &enc_key) != 0) {
1297                                         rte_free(in -
1298                                           (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1299                                         memset(out -
1300                                            (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1301                                           0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1302                                         return -EFAULT;
1303                                 }
1304                                 AES_encrypt(in, out, &enc_key);
1305                                 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1306                                 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1307                         }
1308                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1309                         rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1310                         return 0;
1311                 }
1312
1313         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1314                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1315                 uint8_t *in = NULL;
1316                 uint8_t *out = p_state_buf;
1317                 AES_KEY enc_key;
1318
1319                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1320                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1321                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1322                 in = rte_zmalloc("working mem for key",
1323                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
1324                 if (in == NULL) {
1325                         QAT_LOG(ERR, "Failed to alloc memory");
1326                         return -ENOMEM;
1327                 }
1328
1329                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1330                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1331                         &enc_key) != 0) {
1332                         return -EFAULT;
1333                 }
1334                 AES_encrypt(in, out, &enc_key);
1335                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1336                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1337                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1338                 rte_free(in);
1339                 return 0;
1340         }
1341
1342         block_size = qat_hash_get_block_size(hash_alg);
1343         if (block_size < 0)
1344                 return block_size;
1345         /* init ipad and opad from key and xor with fixed values */
1346         memset(ipad, 0, block_size);
1347         memset(opad, 0, block_size);
1348
1349         if (auth_keylen > (unsigned int)block_size) {
1350                 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1351                 return -EFAULT;
1352         }
1353         rte_memcpy(ipad, auth_key, auth_keylen);
1354         rte_memcpy(opad, auth_key, auth_keylen);
1355
1356         for (i = 0; i < block_size; i++) {
1357                 uint8_t *ipad_ptr = ipad + i;
1358                 uint8_t *opad_ptr = opad + i;
1359                 *ipad_ptr ^= HMAC_IPAD_VALUE;
1360                 *opad_ptr ^= HMAC_OPAD_VALUE;
1361         }
1362
1363         /* do partial hash of ipad and copy to state1 */
1364         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1365                 memset(ipad, 0, block_size);
1366                 memset(opad, 0, block_size);
1367                 QAT_LOG(ERR, "ipad precompute failed");
1368                 return -EFAULT;
1369         }
1370
1371         /*
1372          * State len is a multiple of 8, so may be larger than the digest.
1373          * Put the partial hash of opad state_len bytes after state1
1374          */
1375         *p_state_len = qat_hash_get_state1_size(hash_alg);
1376         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1377                 memset(ipad, 0, block_size);
1378                 memset(opad, 0, block_size);
1379                 QAT_LOG(ERR, "opad precompute failed");
1380                 return -EFAULT;
1381         }
1382
1383         /*  don't leave data lying around */
1384         memset(ipad, 0, block_size);
1385         memset(opad, 0, block_size);
1386         return 0;
1387 }
1388
1389 static void
1390 qat_sym_session_init_common_hdr(struct qat_sym_session *session)
1391 {
1392         struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
1393         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1394         enum qat_sym_proto_flag proto_flags = session->qat_proto_flag;
1395         uint32_t slice_flags = session->slice_types;
1396
1397         header->hdr_flags =
1398                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1399         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1400         header->service_cmd_id = session->qat_cmd;
1401         header->comn_req_flags =
1402                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1403                                         QAT_COMN_PTR_TYPE_FLAT);
1404         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1405                                   ICP_QAT_FW_LA_PARTIAL_NONE);
1406         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1407                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1408
1409         switch (proto_flags)            {
1410         case QAT_CRYPTO_PROTO_FLAG_NONE:
1411                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1412                                         ICP_QAT_FW_LA_NO_PROTO);
1413                 break;
1414         case QAT_CRYPTO_PROTO_FLAG_CCM:
1415                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1416                                         ICP_QAT_FW_LA_CCM_PROTO);
1417                 break;
1418         case QAT_CRYPTO_PROTO_FLAG_GCM:
1419                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1420                                         ICP_QAT_FW_LA_GCM_PROTO);
1421                 break;
1422         case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1423                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1424                                         ICP_QAT_FW_LA_SNOW_3G_PROTO);
1425                 break;
1426         case QAT_CRYPTO_PROTO_FLAG_ZUC:
1427                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1428                         ICP_QAT_FW_LA_ZUC_3G_PROTO);
1429                 break;
1430         }
1431
1432         /* More than one of the following flags can be set at once */
1433         if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_SPC)) {
1434                 ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
1435                         header->serv_specif_flags,
1436                         ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
1437         }
1438         if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_UCS)) {
1439                 ICP_QAT_FW_LA_SLICE_TYPE_SET(
1440                         header->serv_specif_flags,
1441                         ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
1442         }
1443
1444         if (session->is_auth) {
1445                 if (session->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1446                         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1447                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1448                         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1449                                         ICP_QAT_FW_LA_CMP_AUTH_RES);
1450                 } else if (session->auth_op == ICP_QAT_HW_AUTH_GENERATE) {
1451                         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1452                                                 ICP_QAT_FW_LA_RET_AUTH_RES);
1453                         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1454                                                 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1455                 }
1456         } else {
1457                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1458                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1459                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1460                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1461         }
1462
1463         if (session->is_iv12B) {
1464                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1465                         header->serv_specif_flags,
1466                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1467         }
1468
1469         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1470                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
1471         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1472                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1473 }
1474
1475 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
1476                                                 const uint8_t *cipherkey,
1477                                                 uint32_t cipherkeylen)
1478 {
1479         struct icp_qat_hw_cipher_algo_blk *cipher;
1480         struct icp_qat_hw_cipher_algo_blk20 *cipher20;
1481         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1482         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1483         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1484         void *ptr = &req_tmpl->cd_ctrl;
1485         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1486         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1487         enum icp_qat_hw_cipher_convert key_convert;
1488         struct icp_qat_fw_la_cipher_20_req_params *req_ucs =
1489                         (struct icp_qat_fw_la_cipher_20_req_params *)
1490                         &cdesc->fw_req.serv_specif_rqpars;
1491         struct icp_qat_fw_la_cipher_req_params *req_cipher =
1492                         (struct icp_qat_fw_la_cipher_req_params *)
1493                         &cdesc->fw_req.serv_specif_rqpars;
1494         uint32_t total_key_size;
1495         uint16_t cipher_offset, cd_size;
1496         uint32_t wordIndex  = 0;
1497         uint32_t *temp_key = NULL;
1498
1499         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1500                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1501                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1502                                         ICP_QAT_FW_SLICE_CIPHER);
1503                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1504                                         ICP_QAT_FW_SLICE_DRAM_WR);
1505                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1506                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1507                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1508                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1509                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1510         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1511                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1512                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1513                                         ICP_QAT_FW_SLICE_CIPHER);
1514                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1515                                         ICP_QAT_FW_SLICE_AUTH);
1516                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1517                                         ICP_QAT_FW_SLICE_AUTH);
1518                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1519                                         ICP_QAT_FW_SLICE_DRAM_WR);
1520                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1521         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1522                 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1523                 return -EFAULT;
1524         }
1525
1526         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1527                 /*
1528                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
1529                  * Overriding default values previously set.
1530                  * Chacha20-Poly1305 is special case, CTR but single-pass
1531                  * so both direction need to be used.
1532                  */
1533                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1534                 if (cdesc->qat_cipher_alg ==
1535                         ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 &&
1536                         cdesc->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1537                                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
1538                 }
1539                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1540         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1541                 || cdesc->qat_cipher_alg ==
1542                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1543                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1544         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1545                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1546         else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE)
1547                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1548         else
1549                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1550
1551         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1552                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1553                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1554                 cipher_cd_ctrl->cipher_state_sz =
1555                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1556                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1557
1558         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1559                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1560                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1561                 cipher_cd_ctrl->cipher_padding_sz =
1562                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1563         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1564                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1565                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1566         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1567                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1568                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1569         } else if (cdesc->qat_cipher_alg ==
1570                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1571                 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1572                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1573                 cipher_cd_ctrl->cipher_state_sz =
1574                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1575                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1576         } else {
1577                 total_key_size = cipherkeylen;
1578                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1579         }
1580         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1581         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1582
1583         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1584         cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
1585         cipher->cipher_config.val =
1586             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1587                                         cdesc->qat_cipher_alg, key_convert,
1588                                         cdesc->qat_dir);
1589
1590         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1591                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1592                                         sizeof(struct icp_qat_hw_cipher_config)
1593                                         + cipherkeylen);
1594                 memcpy(cipher->key, cipherkey, cipherkeylen);
1595                 memcpy(temp_key, cipherkey, cipherkeylen);
1596
1597                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1598                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1599                                                                 wordIndex++)
1600                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1601
1602                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1603                                         cipherkeylen + cipherkeylen;
1604         } else if (cdesc->is_ucs) {
1605                 const uint8_t *final_key = cipherkey;
1606
1607                 cdesc->slice_types |= QAT_CRYPTO_SLICE_UCS;
1608                 total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
1609                         ICP_QAT_HW_AES_128_KEY_SZ);
1610                 cipher20->cipher_config.reserved[0] = 0;
1611                 cipher20->cipher_config.reserved[1] = 0;
1612                 cipher20->cipher_config.reserved[2] = 0;
1613
1614                 rte_memcpy(cipher20->key, final_key, cipherkeylen);
1615                 cdesc->cd_cur_ptr +=
1616                         sizeof(struct icp_qat_hw_ucs_cipher_config) +
1617                                         cipherkeylen;
1618         } else {
1619                 memcpy(cipher->key, cipherkey, cipherkeylen);
1620                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1621                                         cipherkeylen;
1622         }
1623
1624         if (cdesc->is_single_pass) {
1625                 QAT_FIELD_SET(cipher->cipher_config.val,
1626                         cdesc->digest_length,
1627                         QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
1628                         QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
1629                 /* UCS and SPC 1.8/2.0 share configuration of 2nd config word */
1630                 cdesc->cd.cipher.cipher_config.reserved =
1631                                 ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
1632                                         cdesc->aad_len);
1633                 cdesc->slice_types |= QAT_CRYPTO_SLICE_SPC;
1634         }
1635
1636         if (total_key_size > cipherkeylen) {
1637                 uint32_t padding_size =  total_key_size-cipherkeylen;
1638                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1639                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1640                         /* K3 not provided so use K1 = K3*/
1641                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1642                 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1643                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1644                         /* K2 and K3 not provided so use K1 = K2 = K3*/
1645                         memcpy(cdesc->cd_cur_ptr, cipherkey,
1646                                 cipherkeylen);
1647                         memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1648                                 cipherkey, cipherkeylen);
1649                 } else
1650                         memset(cdesc->cd_cur_ptr, 0, padding_size);
1651
1652                 cdesc->cd_cur_ptr += padding_size;
1653         }
1654         if (cdesc->is_ucs) {
1655                 /*
1656                  * These values match in terms of position auth
1657                  * slice request fields
1658                  */
1659                 req_ucs->spc_auth_res_sz = cdesc->digest_length;
1660                 if (!cdesc->is_gmac) {
1661                         req_ucs->spc_aad_sz = cdesc->aad_len;
1662                         req_ucs->spc_aad_offset = 0;
1663                 }
1664         } else if (cdesc->is_single_pass) {
1665                 req_cipher->spc_aad_sz = cdesc->aad_len;
1666                 req_cipher->spc_auth_res_sz = cdesc->digest_length;
1667         }
1668         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1669         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1670         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1671
1672         return 0;
1673 }
1674
1675 int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
1676                                                 const uint8_t *authkey,
1677                                                 uint32_t authkeylen,
1678                                                 uint32_t aad_length,
1679                                                 uint32_t digestsize,
1680                                                 unsigned int operation)
1681 {
1682         struct icp_qat_hw_auth_setup *hash;
1683         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1684         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1685         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1686         void *ptr = &req_tmpl->cd_ctrl;
1687         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1688         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1689         struct icp_qat_fw_la_auth_req_params *auth_param =
1690                 (struct icp_qat_fw_la_auth_req_params *)
1691                 ((char *)&req_tmpl->serv_specif_rqpars +
1692                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1693         uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1694         uint16_t hash_offset, cd_size;
1695         uint32_t *aad_len = NULL;
1696         uint32_t wordIndex  = 0;
1697         uint32_t *pTempKey;
1698
1699         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1700                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1701                                         ICP_QAT_FW_SLICE_AUTH);
1702                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1703                                         ICP_QAT_FW_SLICE_DRAM_WR);
1704                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1705         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1706                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1707                                 ICP_QAT_FW_SLICE_AUTH);
1708                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1709                                 ICP_QAT_FW_SLICE_CIPHER);
1710                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1711                                 ICP_QAT_FW_SLICE_CIPHER);
1712                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1713                                 ICP_QAT_FW_SLICE_DRAM_WR);
1714                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1715         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1716                 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1717                 return -EFAULT;
1718         }
1719
1720         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY)
1721                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
1722         else
1723                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
1724
1725         /*
1726          * Setup the inner hash config
1727          */
1728         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1729         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
1730         hash->auth_config.reserved = 0;
1731         hash->auth_config.config =
1732                         ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
1733                                 cdesc->qat_hash_alg, digestsize);
1734
1735         if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
1736                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
1737                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
1738                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
1739                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
1740                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
1741                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
1742                 || cdesc->is_cnt_zero
1743                         )
1744                 hash->auth_counter.counter = 0;
1745         else {
1746                 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
1747
1748                 if (block_size < 0)
1749                         return block_size;
1750                 hash->auth_counter.counter = rte_bswap32(block_size);
1751         }
1752
1753         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
1754
1755         /*
1756          * cd_cur_ptr now points at the state1 information.
1757          */
1758         switch (cdesc->qat_hash_alg) {
1759         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1760                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1761                         /* Plain SHA-1 */
1762                         rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
1763                                         sizeof(sha1InitialState));
1764                         state1_size = qat_hash_get_state1_size(
1765                                         cdesc->qat_hash_alg);
1766                         break;
1767                 }
1768                 /* SHA-1 HMAC */
1769                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
1770                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1771                         cdesc->aes_cmac)) {
1772                         QAT_LOG(ERR, "(SHA)precompute failed");
1773                         return -EFAULT;
1774                 }
1775                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
1776                 break;
1777         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1778                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1779                         /* Plain SHA-224 */
1780                         rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
1781                                         sizeof(sha224InitialState));
1782                         state1_size = qat_hash_get_state1_size(
1783                                         cdesc->qat_hash_alg);
1784                         break;
1785                 }
1786                 /* SHA-224 HMAC */
1787                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
1788                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1789                         cdesc->aes_cmac)) {
1790                         QAT_LOG(ERR, "(SHA)precompute failed");
1791                         return -EFAULT;
1792                 }
1793                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
1794                 break;
1795         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1796                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1797                         /* Plain SHA-256 */
1798                         rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
1799                                         sizeof(sha256InitialState));
1800                         state1_size = qat_hash_get_state1_size(
1801                                         cdesc->qat_hash_alg);
1802                         break;
1803                 }
1804                 /* SHA-256 HMAC */
1805                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
1806                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1807                         cdesc->aes_cmac)) {
1808                         QAT_LOG(ERR, "(SHA)precompute failed");
1809                         return -EFAULT;
1810                 }
1811                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
1812                 break;
1813         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1814                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1815                         /* Plain SHA-384 */
1816                         rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
1817                                         sizeof(sha384InitialState));
1818                         state1_size = qat_hash_get_state1_size(
1819                                         cdesc->qat_hash_alg);
1820                         break;
1821                 }
1822                 /* SHA-384 HMAC */
1823                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
1824                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1825                         cdesc->aes_cmac)) {
1826                         QAT_LOG(ERR, "(SHA)precompute failed");
1827                         return -EFAULT;
1828                 }
1829                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
1830                 break;
1831         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1832                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1833                         /* Plain SHA-512 */
1834                         rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
1835                                         sizeof(sha512InitialState));
1836                         state1_size = qat_hash_get_state1_size(
1837                                         cdesc->qat_hash_alg);
1838                         break;
1839                 }
1840                 /* SHA-512 HMAC */
1841                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
1842                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1843                         cdesc->aes_cmac)) {
1844                         QAT_LOG(ERR, "(SHA)precompute failed");
1845                         return -EFAULT;
1846                 }
1847                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
1848                 break;
1849         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1850                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1851
1852                 if (cdesc->aes_cmac)
1853                         memset(cdesc->cd_cur_ptr, 0, state1_size);
1854                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
1855                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
1856                         &state2_size, cdesc->aes_cmac)) {
1857                         cdesc->aes_cmac ? QAT_LOG(ERR,
1858                                                   "(CMAC)precompute failed")
1859                                         : QAT_LOG(ERR,
1860                                                   "(XCBC)precompute failed");
1861                         return -EFAULT;
1862                 }
1863                 break;
1864         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1865         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1866                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1867                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
1868                 if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
1869                         authkeylen, cdesc->cd_cur_ptr + state1_size,
1870                         &state2_size, cdesc->aes_cmac)) {
1871                         QAT_LOG(ERR, "(GCM)precompute failed");
1872                         return -EFAULT;
1873                 }
1874                 /*
1875                  * Write (the length of AAD) into bytes 16-19 of state2
1876                  * in big-endian format. This field is 8 bytes
1877                  */
1878                 auth_param->u2.aad_sz =
1879                                 RTE_ALIGN_CEIL(aad_length, 16);
1880                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
1881
1882                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
1883                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
1884                                         ICP_QAT_HW_GALOIS_H_SZ);
1885                 *aad_len = rte_bswap32(aad_length);
1886                 cdesc->aad_len = aad_length;
1887                 break;
1888         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1889                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1890                 state1_size = qat_hash_get_state1_size(
1891                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
1892                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
1893                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1894
1895                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
1896                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
1897                 cipherconfig->cipher_config.val =
1898                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
1899                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
1900                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
1901                         ICP_QAT_HW_CIPHER_ENCRYPT);
1902                 memcpy(cipherconfig->key, authkey, authkeylen);
1903                 memset(cipherconfig->key + authkeylen,
1904                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
1905                 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
1906                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1907                 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1908                 break;
1909         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1910                 hash->auth_config.config =
1911                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
1912                                 cdesc->qat_hash_alg, digestsize);
1913                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1914                 state1_size = qat_hash_get_state1_size(
1915                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
1916                 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
1917                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
1918                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
1919
1920                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1921                 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1922                 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1923
1924                 break;
1925         case ICP_QAT_HW_AUTH_ALGO_MD5:
1926                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
1927                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1928                         cdesc->aes_cmac)) {
1929                         QAT_LOG(ERR, "(MD5)precompute failed");
1930                         return -EFAULT;
1931                 }
1932                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
1933                 break;
1934         case ICP_QAT_HW_AUTH_ALGO_NULL:
1935                 state1_size = qat_hash_get_state1_size(
1936                                 ICP_QAT_HW_AUTH_ALGO_NULL);
1937                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
1938                 break;
1939         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1940                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
1941                 state1_size = qat_hash_get_state1_size(
1942                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
1943                 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
1944                                 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
1945
1946                 if (aad_length > 0) {
1947                         aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
1948                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
1949                         auth_param->u2.aad_sz =
1950                         RTE_ALIGN_CEIL(aad_length,
1951                         ICP_QAT_HW_CCM_AAD_ALIGNMENT);
1952                 } else {
1953                         auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
1954                 }
1955                 cdesc->aad_len = aad_length;
1956                 hash->auth_counter.counter = 0;
1957
1958                 hash_cd_ctrl->outer_prefix_sz = digestsize;
1959                 auth_param->hash_state_sz = digestsize;
1960
1961                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1962                 break;
1963         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1964                 state1_size = qat_hash_get_state1_size(
1965                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
1966                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
1967                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1968                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
1969                                                         + authkeylen);
1970                 /*
1971                 * The Inner Hash Initial State2 block must contain IK
1972                 * (Initialisation Key), followed by IK XOR-ed with KM
1973                 * (Key Modifier): IK||(IK^KM).
1974                 */
1975                 /* write the auth key */
1976                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1977                 /* initialise temp key with auth key */
1978                 memcpy(pTempKey, authkey, authkeylen);
1979                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
1980                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
1981                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
1982                 break;
1983         default:
1984                 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
1985                 return -EFAULT;
1986         }
1987
1988         /* Auth CD config setup */
1989         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
1990         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
1991         hash_cd_ctrl->inner_res_sz = digestsize;
1992         hash_cd_ctrl->final_sz = digestsize;
1993         hash_cd_ctrl->inner_state1_sz = state1_size;
1994         auth_param->auth_res_sz = digestsize;
1995
1996         hash_cd_ctrl->inner_state2_sz  = state2_size;
1997         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
1998                         ((sizeof(struct icp_qat_hw_auth_setup) +
1999                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2000                                         >> 3);
2001
2002         cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2003         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2004
2005         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2006         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2007
2008         return 0;
2009 }
2010
2011 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2012 {
2013         switch (key_len) {
2014         case ICP_QAT_HW_AES_128_KEY_SZ:
2015                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2016                 break;
2017         case ICP_QAT_HW_AES_192_KEY_SZ:
2018                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2019                 break;
2020         case ICP_QAT_HW_AES_256_KEY_SZ:
2021                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2022                 break;
2023         default:
2024                 return -EINVAL;
2025         }
2026         return 0;
2027 }
2028
2029 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2030                 enum icp_qat_hw_cipher_algo *alg)
2031 {
2032         switch (key_len) {
2033         case ICP_QAT_HW_AES_128_KEY_SZ:
2034                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2035                 break;
2036         case ICP_QAT_HW_AES_256_KEY_SZ:
2037                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2038                 break;
2039         default:
2040                 return -EINVAL;
2041         }
2042         return 0;
2043 }
2044
2045 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2046 {
2047         switch (key_len) {
2048         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2049                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2050                 break;
2051         default:
2052                 return -EINVAL;
2053         }
2054         return 0;
2055 }
2056
2057 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2058 {
2059         switch (key_len) {
2060         case ICP_QAT_HW_KASUMI_KEY_SZ:
2061                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2062                 break;
2063         default:
2064                 return -EINVAL;
2065         }
2066         return 0;
2067 }
2068
2069 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2070 {
2071         switch (key_len) {
2072         case ICP_QAT_HW_DES_KEY_SZ:
2073                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2074                 break;
2075         default:
2076                 return -EINVAL;
2077         }
2078         return 0;
2079 }
2080
2081 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2082 {
2083         switch (key_len) {
2084         case QAT_3DES_KEY_SZ_OPT1:
2085         case QAT_3DES_KEY_SZ_OPT2:
2086         case QAT_3DES_KEY_SZ_OPT3:
2087                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2088                 break;
2089         default:
2090                 return -EINVAL;
2091         }
2092         return 0;
2093 }
2094
2095 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2096 {
2097         switch (key_len) {
2098         case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2099                 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2100                 break;
2101         default:
2102                 return -EINVAL;
2103         }
2104         return 0;
2105 }
2106
2107 #ifdef RTE_LIB_SECURITY
2108 static int
2109 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2110 {
2111         struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2112         struct rte_security_docsis_xform *docsis = &conf->docsis;
2113
2114         /* CRC generate -> Cipher encrypt */
2115         if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2116
2117                 if (crypto_sym != NULL &&
2118                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2119                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2120                     crypto_sym->cipher.algo ==
2121                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2122                     (crypto_sym->cipher.key.length ==
2123                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2124                      crypto_sym->cipher.key.length ==
2125                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2126                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2127                     crypto_sym->next == NULL) {
2128                         return 0;
2129                 }
2130         /* Cipher decrypt -> CRC verify */
2131         } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2132
2133                 if (crypto_sym != NULL &&
2134                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2135                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2136                     crypto_sym->cipher.algo ==
2137                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2138                     (crypto_sym->cipher.key.length ==
2139                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2140                      crypto_sym->cipher.key.length ==
2141                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2142                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2143                     crypto_sym->next == NULL) {
2144                         return 0;
2145                 }
2146         }
2147
2148         return -EINVAL;
2149 }
2150
2151 static int
2152 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2153                 struct rte_security_session_conf *conf, void *session_private)
2154 {
2155         int ret;
2156         int qat_cmd_id;
2157         struct rte_crypto_sym_xform *xform = NULL;
2158         struct qat_sym_session *session = session_private;
2159
2160         /* Clear the session */
2161         memset(session, 0, qat_sym_session_get_private_size(dev));
2162
2163         ret = qat_sec_session_check_docsis(conf);
2164         if (ret) {
2165                 QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2166                 return ret;
2167         }
2168
2169         xform = conf->crypto_xform;
2170
2171         /* Verify the session physical address is known */
2172         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2173         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2174                 QAT_LOG(ERR,
2175                         "Session physical address unknown. Bad memory pool.");
2176                 return -EINVAL;
2177         }
2178
2179         /* Set context descriptor physical address */
2180         session->cd_paddr = session_paddr +
2181                         offsetof(struct qat_sym_session, cd);
2182
2183         /* Get requested QAT command id - should be cipher */
2184         qat_cmd_id = qat_get_cmd_id(xform);
2185         if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2186                 QAT_LOG(ERR, "Unsupported xform chain requested");
2187                 return -ENOTSUP;
2188         }
2189         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2190
2191         ret = qat_sym_session_configure_cipher(dev, xform, session);
2192         if (ret < 0)
2193                 return ret;
2194         qat_sym_session_finalize(session);
2195
2196         return 0;
2197 }
2198
2199 int
2200 qat_security_session_create(void *dev,
2201                                 struct rte_security_session_conf *conf,
2202                                 struct rte_security_session *sess,
2203                                 struct rte_mempool *mempool)
2204 {
2205         void *sess_private_data;
2206         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2207         struct qat_cryptodev_private *internals = cdev->data->dev_private;
2208         enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
2209         struct qat_sym_session *sym_session = NULL;
2210         int ret;
2211
2212         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2213                         conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2214                 QAT_LOG(ERR, "Invalid security protocol");
2215                 return -EINVAL;
2216         }
2217
2218         if (rte_mempool_get(mempool, &sess_private_data)) {
2219                 QAT_LOG(ERR, "Couldn't get object from session mempool");
2220                 return -ENOMEM;
2221         }
2222
2223         ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2224                         sess_private_data);
2225         if (ret != 0) {
2226                 QAT_LOG(ERR, "Failed to configure session parameters");
2227                 /* Return session to mempool */
2228                 rte_mempool_put(mempool, sess_private_data);
2229                 return ret;
2230         }
2231
2232         set_sec_session_private_data(sess, sess_private_data);
2233         sym_session = (struct qat_sym_session *)sess_private_data;
2234         sym_session->dev_id = internals->dev_id;
2235
2236         return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
2237                         sess_private_data);
2238 }
2239
2240 int
2241 qat_security_session_destroy(void *dev __rte_unused,
2242                                  struct rte_security_session *sess)
2243 {
2244         void *sess_priv = get_sec_session_private_data(sess);
2245         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2246
2247         if (sess_priv) {
2248                 if (s->bpi_ctx)
2249                         bpi_cipher_ctx_free(s->bpi_ctx);
2250                 memset(s, 0, qat_sym_session_get_private_size(dev));
2251                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2252
2253                 set_sec_session_private_data(sess, NULL);
2254                 rte_mempool_put(sess_mp, sess_priv);
2255         }
2256         return 0;
2257 }
2258 #endif