net/tap: set BPF syscall ID for RISC-V
[dpdk.git] / drivers / crypto / qat / qat_sym_session.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  * Copyright(c) 2015-2022 Intel Corporation
3  */
4
5 #define OPENSSL_API_COMPAT 0x10100000L
6
7 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
8 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
9 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
10 #include <openssl/evp.h>        /* Needed for bpi runt block processing */
11
12 #include <rte_memcpy.h>
13 #include <rte_common.h>
14 #include <rte_spinlock.h>
15 #include <rte_byteorder.h>
16 #include <rte_log.h>
17 #include <rte_malloc.h>
18 #include <rte_crypto_sym.h>
19 #ifdef RTE_LIB_SECURITY
20 #include <rte_security.h>
21 #endif
22
23 #include "qat_logs.h"
24 #include "qat_sym_session.h"
25 #include "qat_sym.h"
26
27 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
28 static const uint8_t sha1InitialState[] = {
29         0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
30         0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
31
32 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
33 static const uint8_t sha224InitialState[] = {
34         0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
35         0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
36         0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
37
38 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
39 static const uint8_t sha256InitialState[] = {
40         0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
41         0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
42         0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
43
44 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
45 static const uint8_t sha384InitialState[] = {
46         0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
47         0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
48         0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
49         0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
50         0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
51         0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
52
53 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
54 static const uint8_t sha512InitialState[] = {
55         0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
56         0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
57         0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
58         0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
59         0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
60         0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
61
62 static int
63 qat_sym_cd_cipher_set(struct qat_sym_session *cd,
64                                                 const uint8_t *enckey,
65                                                 uint32_t enckeylen);
66
67 static int
68 qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
69                                                 const uint8_t *authkey,
70                                                 uint32_t authkeylen,
71                                                 uint32_t aad_length,
72                                                 uint32_t digestsize,
73                                                 unsigned int operation);
74 static void
75 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
76
77 /* Req/cd init functions */
78
79 static void
80 qat_sym_session_finalize(struct qat_sym_session *session)
81 {
82         qat_sym_session_init_common_hdr(session);
83 }
84
85 /** Frees a context previously created
86  *  Depends on openssl libcrypto
87  */
88 static void
89 bpi_cipher_ctx_free(void *bpi_ctx)
90 {
91         if (bpi_ctx != NULL)
92                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
93 }
94
95 /** Creates a context in either AES or DES in ECB mode
96  *  Depends on openssl libcrypto
97  */
98 static int
99 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
100                 enum rte_crypto_cipher_operation direction __rte_unused,
101                 const uint8_t *key, uint16_t key_length, void **ctx)
102 {
103         const EVP_CIPHER *algo = NULL;
104         int ret;
105         *ctx = EVP_CIPHER_CTX_new();
106
107         if (*ctx == NULL) {
108                 ret = -ENOMEM;
109                 goto ctx_init_err;
110         }
111
112         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
113                 algo = EVP_des_ecb();
114         else
115                 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
116                         algo = EVP_aes_128_ecb();
117                 else
118                         algo = EVP_aes_256_ecb();
119
120         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
121         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
122                 ret = -EINVAL;
123                 goto ctx_init_err;
124         }
125
126         return 0;
127
128 ctx_init_err:
129         if (*ctx != NULL)
130                 EVP_CIPHER_CTX_free(*ctx);
131         return ret;
132 }
133
134 static int
135 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
136                 struct qat_cryptodev_private *internals)
137 {
138         int i = 0;
139         const struct rte_cryptodev_capabilities *capability;
140
141         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
142                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
143                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
144                         continue;
145
146                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
147                         continue;
148
149                 if (capability->sym.cipher.algo == algo)
150                         return 1;
151         }
152         return 0;
153 }
154
155 static int
156 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
157                 struct qat_cryptodev_private *internals)
158 {
159         int i = 0;
160         const struct rte_cryptodev_capabilities *capability;
161
162         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
163                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
164                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
165                         continue;
166
167                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
168                         continue;
169
170                 if (capability->sym.auth.algo == algo)
171                         return 1;
172         }
173         return 0;
174 }
175
176 void
177 qat_sym_session_clear(struct rte_cryptodev *dev,
178                 struct rte_cryptodev_sym_session *sess)
179 {
180         uint8_t index = dev->driver_id;
181         void *sess_priv = get_sym_session_private_data(sess, index);
182         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
183
184         if (sess_priv) {
185                 if (s->bpi_ctx)
186                         bpi_cipher_ctx_free(s->bpi_ctx);
187                 memset(s, 0, qat_sym_session_get_private_size(dev));
188                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
189
190                 set_sym_session_private_data(sess, index, NULL);
191                 rte_mempool_put(sess_mp, sess_priv);
192         }
193 }
194
195 static int
196 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
197 {
198         /* Cipher Only */
199         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
200                 return ICP_QAT_FW_LA_CMD_CIPHER;
201
202         /* Authentication Only */
203         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
204                 return ICP_QAT_FW_LA_CMD_AUTH;
205
206         /* AEAD */
207         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
208                 /* AES-GCM and AES-CCM works with different direction
209                  * GCM first encrypts and generate hash where AES-CCM
210                  * first generate hash and encrypts. Similar relation
211                  * applies to decryption.
212                  */
213                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
214                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
215                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
216                         else
217                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
218                 else
219                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
220                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
221                         else
222                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
223         }
224
225         if (xform->next == NULL)
226                 return -1;
227
228         /* Cipher then Authenticate */
229         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
230                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
231                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
232
233         /* Authenticate then Cipher */
234         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
235                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
236                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
237
238         return -1;
239 }
240
241 static struct rte_crypto_auth_xform *
242 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
243 {
244         do {
245                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
246                         return &xform->auth;
247
248                 xform = xform->next;
249         } while (xform);
250
251         return NULL;
252 }
253
254 static struct rte_crypto_cipher_xform *
255 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
256 {
257         do {
258                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
259                         return &xform->cipher;
260
261                 xform = xform->next;
262         } while (xform);
263
264         return NULL;
265 }
266
267 int
268 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
269                 struct rte_crypto_sym_xform *xform,
270                 struct qat_sym_session *session)
271 {
272         struct qat_cryptodev_private *internals = dev->data->dev_private;
273         struct rte_crypto_cipher_xform *cipher_xform = NULL;
274         enum qat_device_gen qat_dev_gen =
275                                 internals->qat_dev->qat_dev_gen;
276         int ret;
277
278         /* Get cipher xform from crypto xform chain */
279         cipher_xform = qat_get_cipher_xform(xform);
280
281         session->cipher_iv.offset = cipher_xform->iv.offset;
282         session->cipher_iv.length = cipher_xform->iv.length;
283
284         switch (cipher_xform->algo) {
285         case RTE_CRYPTO_CIPHER_AES_CBC:
286                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
287                                 &session->qat_cipher_alg) != 0) {
288                         QAT_LOG(ERR, "Invalid AES cipher key size");
289                         ret = -EINVAL;
290                         goto error_out;
291                 }
292                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
293                 break;
294         case RTE_CRYPTO_CIPHER_AES_CTR:
295                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
296                                 &session->qat_cipher_alg) != 0) {
297                         QAT_LOG(ERR, "Invalid AES cipher key size");
298                         ret = -EINVAL;
299                         goto error_out;
300                 }
301                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
302                 if (qat_dev_gen == QAT_GEN4)
303                         session->is_ucs = 1;
304                 break;
305         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
306                 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
307                                         &session->qat_cipher_alg) != 0) {
308                         QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
309                         ret = -EINVAL;
310                         goto error_out;
311                 }
312                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
313                 break;
314         case RTE_CRYPTO_CIPHER_NULL:
315                 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
316                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
317                 break;
318         case RTE_CRYPTO_CIPHER_KASUMI_F8:
319                 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
320                                         &session->qat_cipher_alg) != 0) {
321                         QAT_LOG(ERR, "Invalid KASUMI cipher key size");
322                         ret = -EINVAL;
323                         goto error_out;
324                 }
325                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
326                 break;
327         case RTE_CRYPTO_CIPHER_3DES_CBC:
328                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
329                                 &session->qat_cipher_alg) != 0) {
330                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
331                         ret = -EINVAL;
332                         goto error_out;
333                 }
334                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
335                 break;
336         case RTE_CRYPTO_CIPHER_DES_CBC:
337                 if (qat_sym_validate_des_key(cipher_xform->key.length,
338                                 &session->qat_cipher_alg) != 0) {
339                         QAT_LOG(ERR, "Invalid DES cipher key size");
340                         ret = -EINVAL;
341                         goto error_out;
342                 }
343                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
344                 break;
345         case RTE_CRYPTO_CIPHER_3DES_CTR:
346                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
347                                 &session->qat_cipher_alg) != 0) {
348                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
349                         ret = -EINVAL;
350                         goto error_out;
351                 }
352                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
353                 break;
354         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
355                 ret = bpi_cipher_ctx_init(
356                                         cipher_xform->algo,
357                                         cipher_xform->op,
358                                         cipher_xform->key.data,
359                                         cipher_xform->key.length,
360                                         &session->bpi_ctx);
361                 if (ret != 0) {
362                         QAT_LOG(ERR, "failed to create DES BPI ctx");
363                         goto error_out;
364                 }
365                 if (qat_sym_validate_des_key(cipher_xform->key.length,
366                                 &session->qat_cipher_alg) != 0) {
367                         QAT_LOG(ERR, "Invalid DES cipher key size");
368                         ret = -EINVAL;
369                         goto error_out;
370                 }
371                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
372                 break;
373         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
374                 ret = bpi_cipher_ctx_init(
375                                         cipher_xform->algo,
376                                         cipher_xform->op,
377                                         cipher_xform->key.data,
378                                         cipher_xform->key.length,
379                                         &session->bpi_ctx);
380                 if (ret != 0) {
381                         QAT_LOG(ERR, "failed to create AES BPI ctx");
382                         goto error_out;
383                 }
384                 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
385                                 &session->qat_cipher_alg) != 0) {
386                         QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
387                         ret = -EINVAL;
388                         goto error_out;
389                 }
390                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
391                 break;
392         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
393                 if (!qat_is_cipher_alg_supported(
394                         cipher_xform->algo, internals)) {
395                         QAT_LOG(ERR, "%s not supported on this device",
396                                 rte_crypto_cipher_algorithm_strings
397                                         [cipher_xform->algo]);
398                         ret = -ENOTSUP;
399                         goto error_out;
400                 }
401                 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
402                                 &session->qat_cipher_alg) != 0) {
403                         QAT_LOG(ERR, "Invalid ZUC cipher key size");
404                         ret = -EINVAL;
405                         goto error_out;
406                 }
407                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
408                 break;
409         case RTE_CRYPTO_CIPHER_AES_XTS:
410                 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
411                         QAT_LOG(ERR, "AES-XTS-192 not supported");
412                         ret = -EINVAL;
413                         goto error_out;
414                 }
415                 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
416                                 &session->qat_cipher_alg) != 0) {
417                         QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
418                         ret = -EINVAL;
419                         goto error_out;
420                 }
421                 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
422                 break;
423         case RTE_CRYPTO_CIPHER_3DES_ECB:
424         case RTE_CRYPTO_CIPHER_AES_ECB:
425         case RTE_CRYPTO_CIPHER_AES_F8:
426         case RTE_CRYPTO_CIPHER_ARC4:
427                 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
428                                 cipher_xform->algo);
429                 ret = -ENOTSUP;
430                 goto error_out;
431         default:
432                 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
433                                 cipher_xform->algo);
434                 ret = -EINVAL;
435                 goto error_out;
436         }
437
438         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
439                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
440         else
441                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
442
443         if (qat_sym_cd_cipher_set(session,
444                                                 cipher_xform->key.data,
445                                                 cipher_xform->key.length)) {
446                 ret = -EINVAL;
447                 goto error_out;
448         }
449
450         return 0;
451
452 error_out:
453         if (session->bpi_ctx) {
454                 bpi_cipher_ctx_free(session->bpi_ctx);
455                 session->bpi_ctx = NULL;
456         }
457         return ret;
458 }
459
460 int
461 qat_sym_session_configure(struct rte_cryptodev *dev,
462                 struct rte_crypto_sym_xform *xform,
463                 struct rte_cryptodev_sym_session *sess,
464                 struct rte_mempool *mempool)
465 {
466         void *sess_private_data;
467         int ret;
468
469         if (rte_mempool_get(mempool, &sess_private_data)) {
470                 CDEV_LOG_ERR(
471                         "Couldn't get object from session mempool");
472                 return -ENOMEM;
473         }
474
475         ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
476         if (ret != 0) {
477                 QAT_LOG(ERR,
478                     "Crypto QAT PMD: failed to configure session parameters");
479
480                 /* Return session to mempool */
481                 rte_mempool_put(mempool, sess_private_data);
482                 return ret;
483         }
484
485         set_sym_session_private_data(sess, dev->driver_id,
486                 sess_private_data);
487
488         return 0;
489 }
490
491 int
492 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
493                 struct rte_crypto_sym_xform *xform, void *session_private)
494 {
495         struct qat_sym_session *session = session_private;
496         struct qat_cryptodev_private *internals = dev->data->dev_private;
497         enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
498         int ret;
499         int qat_cmd_id;
500
501         /* Verify the session physical address is known */
502         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
503         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
504                 QAT_LOG(ERR,
505                         "Session physical address unknown. Bad memory pool.");
506                 return -EINVAL;
507         }
508
509         memset(session, 0, sizeof(*session));
510         /* Set context descriptor physical address */
511         session->cd_paddr = session_paddr +
512                         offsetof(struct qat_sym_session, cd);
513
514         session->dev_id = internals->dev_id;
515         session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
516         session->is_ucs = 0;
517
518         /* Get requested QAT command id */
519         qat_cmd_id = qat_get_cmd_id(xform);
520         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
521                 QAT_LOG(ERR, "Unsupported xform chain requested");
522                 return -ENOTSUP;
523         }
524         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
525         switch (session->qat_cmd) {
526         case ICP_QAT_FW_LA_CMD_CIPHER:
527                 ret = qat_sym_session_configure_cipher(dev, xform, session);
528                 if (ret < 0)
529                         return ret;
530                 break;
531         case ICP_QAT_FW_LA_CMD_AUTH:
532                 ret = qat_sym_session_configure_auth(dev, xform, session);
533                 if (ret < 0)
534                         return ret;
535                 session->is_single_pass_gmac =
536                                qat_dev_gen == QAT_GEN3 &&
537                                xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
538                                xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
539                 break;
540         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
541                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
542                         ret = qat_sym_session_configure_aead(dev, xform,
543                                         session);
544                         if (ret < 0)
545                                 return ret;
546                 } else {
547                         ret = qat_sym_session_configure_cipher(dev,
548                                         xform, session);
549                         if (ret < 0)
550                                 return ret;
551                         ret = qat_sym_session_configure_auth(dev,
552                                         xform, session);
553                         if (ret < 0)
554                                 return ret;
555                 }
556                 break;
557         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
558                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
559                         ret = qat_sym_session_configure_aead(dev, xform,
560                                         session);
561                         if (ret < 0)
562                                 return ret;
563                 } else {
564                         ret = qat_sym_session_configure_auth(dev,
565                                         xform, session);
566                         if (ret < 0)
567                                 return ret;
568                         ret = qat_sym_session_configure_cipher(dev,
569                                         xform, session);
570                         if (ret < 0)
571                                 return ret;
572                 }
573                 break;
574         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
575         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
576         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
577         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
578         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
579         case ICP_QAT_FW_LA_CMD_MGF1:
580         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
581         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
582         case ICP_QAT_FW_LA_CMD_DELIMITER:
583         QAT_LOG(ERR, "Unsupported Service %u",
584                 session->qat_cmd);
585                 return -ENOTSUP;
586         default:
587         QAT_LOG(ERR, "Unsupported Service %u",
588                 session->qat_cmd);
589                 return -ENOTSUP;
590         }
591         qat_sym_session_finalize(session);
592
593         return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
594                         (void *)session);
595 }
596
597 static int
598 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
599                 const struct rte_crypto_aead_xform *aead_xform)
600 {
601         session->is_single_pass = 1;
602         session->is_auth = 1;
603         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
604         /* Chacha-Poly is special case that use QAT CTR mode */
605         if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
606                 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
607         else
608                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
609
610         session->cipher_iv.offset = aead_xform->iv.offset;
611         session->cipher_iv.length = aead_xform->iv.length;
612         session->aad_len = aead_xform->aad_length;
613         session->digest_length = aead_xform->digest_length;
614
615         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
616                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
617                 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
618         } else {
619                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
620                 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
621         }
622
623         return 0;
624 }
625
626 int
627 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
628                                 struct rte_crypto_sym_xform *xform,
629                                 struct qat_sym_session *session)
630 {
631         struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
632         struct qat_cryptodev_private *internals = dev->data->dev_private;
633         const uint8_t *key_data = auth_xform->key.data;
634         uint8_t key_length = auth_xform->key.length;
635         enum qat_device_gen qat_dev_gen =
636                         internals->qat_dev->qat_dev_gen;
637
638         session->aes_cmac = 0;
639         session->auth_key_length = auth_xform->key.length;
640         session->auth_iv.offset = auth_xform->iv.offset;
641         session->auth_iv.length = auth_xform->iv.length;
642         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
643         session->is_auth = 1;
644         session->digest_length = auth_xform->digest_length;
645
646         switch (auth_xform->algo) {
647         case RTE_CRYPTO_AUTH_SHA1:
648                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
649                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
650                 break;
651         case RTE_CRYPTO_AUTH_SHA224:
652                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
653                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
654                 break;
655         case RTE_CRYPTO_AUTH_SHA256:
656                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
657                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
658                 break;
659         case RTE_CRYPTO_AUTH_SHA384:
660                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
661                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
662                 break;
663         case RTE_CRYPTO_AUTH_SHA512:
664                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
665                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
666                 break;
667         case RTE_CRYPTO_AUTH_SHA1_HMAC:
668                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
669                 break;
670         case RTE_CRYPTO_AUTH_SHA224_HMAC:
671                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
672                 break;
673         case RTE_CRYPTO_AUTH_SHA256_HMAC:
674                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
675                 break;
676         case RTE_CRYPTO_AUTH_SHA384_HMAC:
677                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
678                 break;
679         case RTE_CRYPTO_AUTH_SHA512_HMAC:
680                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
681                 break;
682         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
683                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
684                 break;
685         case RTE_CRYPTO_AUTH_AES_CMAC:
686                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
687                 session->aes_cmac = 1;
688                 break;
689         case RTE_CRYPTO_AUTH_AES_GMAC:
690                 if (qat_sym_validate_aes_key(auth_xform->key.length,
691                                 &session->qat_cipher_alg) != 0) {
692                         QAT_LOG(ERR, "Invalid AES key size");
693                         return -EINVAL;
694                 }
695                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
696                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
697                 if (session->auth_iv.length == 0)
698                         session->auth_iv.length = AES_GCM_J0_LEN;
699                 else
700                         session->is_iv12B = 1;
701                 if (qat_dev_gen == QAT_GEN4) {
702                         session->is_cnt_zero = 1;
703                         session->is_ucs = 1;
704                 }
705                 break;
706         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
707                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
708                 break;
709         case RTE_CRYPTO_AUTH_MD5_HMAC:
710                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
711                 break;
712         case RTE_CRYPTO_AUTH_NULL:
713                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
714                 break;
715         case RTE_CRYPTO_AUTH_KASUMI_F9:
716                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
717                 break;
718         case RTE_CRYPTO_AUTH_ZUC_EIA3:
719                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
720                         QAT_LOG(ERR, "%s not supported on this device",
721                                 rte_crypto_auth_algorithm_strings
722                                 [auth_xform->algo]);
723                         return -ENOTSUP;
724                 }
725                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
726                 break;
727         case RTE_CRYPTO_AUTH_MD5:
728         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
729                 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
730                                 auth_xform->algo);
731                 return -ENOTSUP;
732         default:
733                 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
734                                 auth_xform->algo);
735                 return -EINVAL;
736         }
737
738         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
739                 session->is_gmac = 1;
740                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
741                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
742                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
743                         /*
744                          * It needs to create cipher desc content first,
745                          * then authentication
746                          */
747                         if (qat_sym_cd_cipher_set(session,
748                                                 auth_xform->key.data,
749                                                 auth_xform->key.length))
750                                 return -EINVAL;
751
752                         if (qat_sym_cd_auth_set(session,
753                                                 key_data,
754                                                 key_length,
755                                                 0,
756                                                 auth_xform->digest_length,
757                                                 auth_xform->op))
758                                 return -EINVAL;
759                 } else {
760                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
761                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
762                         /*
763                          * It needs to create authentication desc content first,
764                          * then cipher
765                          */
766
767                         if (qat_sym_cd_auth_set(session,
768                                         key_data,
769                                         key_length,
770                                         0,
771                                         auth_xform->digest_length,
772                                         auth_xform->op))
773                                 return -EINVAL;
774
775                         if (qat_sym_cd_cipher_set(session,
776                                                 auth_xform->key.data,
777                                                 auth_xform->key.length))
778                                 return -EINVAL;
779                 }
780         } else {
781                 if (qat_sym_cd_auth_set(session,
782                                 key_data,
783                                 key_length,
784                                 0,
785                                 auth_xform->digest_length,
786                                 auth_xform->op))
787                         return -EINVAL;
788         }
789
790         return 0;
791 }
792
793 int
794 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
795                                 struct rte_crypto_sym_xform *xform,
796                                 struct qat_sym_session *session)
797 {
798         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
799         enum rte_crypto_auth_operation crypto_operation;
800         struct qat_cryptodev_private *internals =
801                         dev->data->dev_private;
802         enum qat_device_gen qat_dev_gen =
803                         internals->qat_dev->qat_dev_gen;
804
805         /*
806          * Store AEAD IV parameters as cipher IV,
807          * to avoid unnecessary memory usage
808          */
809         session->cipher_iv.offset = xform->aead.iv.offset;
810         session->cipher_iv.length = xform->aead.iv.length;
811
812         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
813         session->is_auth = 1;
814         session->digest_length = aead_xform->digest_length;
815
816         session->is_single_pass = 0;
817         switch (aead_xform->algo) {
818         case RTE_CRYPTO_AEAD_AES_GCM:
819                 if (qat_sym_validate_aes_key(aead_xform->key.length,
820                                 &session->qat_cipher_alg) != 0) {
821                         QAT_LOG(ERR, "Invalid AES key size");
822                         return -EINVAL;
823                 }
824                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
825                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
826
827                 if (qat_dev_gen == QAT_GEN4)
828                         session->is_ucs = 1;
829                 if (session->cipher_iv.length == 0) {
830                         session->cipher_iv.length = AES_GCM_J0_LEN;
831                         break;
832                 }
833                 session->is_iv12B = 1;
834                 if (qat_dev_gen < QAT_GEN3)
835                         break;
836                 qat_sym_session_handle_single_pass(session,
837                                 aead_xform);
838                 break;
839         case RTE_CRYPTO_AEAD_AES_CCM:
840                 if (qat_sym_validate_aes_key(aead_xform->key.length,
841                                 &session->qat_cipher_alg) != 0) {
842                         QAT_LOG(ERR, "Invalid AES key size");
843                         return -EINVAL;
844                 }
845                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
846                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
847                 if (qat_dev_gen == QAT_GEN4)
848                         session->is_ucs = 1;
849                 break;
850         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
851                 if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
852                         return -EINVAL;
853                 if (qat_dev_gen == QAT_GEN4)
854                         session->is_ucs = 1;
855                 session->qat_cipher_alg =
856                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
857                 qat_sym_session_handle_single_pass(session,
858                                                 aead_xform);
859                 break;
860         default:
861                 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
862                                 aead_xform->algo);
863                 return -EINVAL;
864         }
865
866         if (session->is_single_pass) {
867                 if (qat_sym_cd_cipher_set(session,
868                                 aead_xform->key.data, aead_xform->key.length))
869                         return -EINVAL;
870         } else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
871                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
872                         (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
873                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
874                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
875                 /*
876                  * It needs to create cipher desc content first,
877                  * then authentication
878                  */
879                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
880                         RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
881
882                 if (qat_sym_cd_cipher_set(session,
883                                         aead_xform->key.data,
884                                         aead_xform->key.length))
885                         return -EINVAL;
886
887                 if (qat_sym_cd_auth_set(session,
888                                         aead_xform->key.data,
889                                         aead_xform->key.length,
890                                         aead_xform->aad_length,
891                                         aead_xform->digest_length,
892                                         crypto_operation))
893                         return -EINVAL;
894         } else {
895                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
896                 /*
897                  * It needs to create authentication desc content first,
898                  * then cipher
899                  */
900
901                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
902                         RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
903
904                 if (qat_sym_cd_auth_set(session,
905                                         aead_xform->key.data,
906                                         aead_xform->key.length,
907                                         aead_xform->aad_length,
908                                         aead_xform->digest_length,
909                                         crypto_operation))
910                         return -EINVAL;
911
912                 if (qat_sym_cd_cipher_set(session,
913                                         aead_xform->key.data,
914                                         aead_xform->key.length))
915                         return -EINVAL;
916         }
917
918         return 0;
919 }
920
921 unsigned int qat_sym_session_get_private_size(
922                 struct rte_cryptodev *dev __rte_unused)
923 {
924         return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
925 }
926
927 /* returns block size in bytes per cipher algo */
928 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
929 {
930         switch (qat_cipher_alg) {
931         case ICP_QAT_HW_CIPHER_ALGO_DES:
932                 return ICP_QAT_HW_DES_BLK_SZ;
933         case ICP_QAT_HW_CIPHER_ALGO_3DES:
934                 return ICP_QAT_HW_3DES_BLK_SZ;
935         case ICP_QAT_HW_CIPHER_ALGO_AES128:
936         case ICP_QAT_HW_CIPHER_ALGO_AES192:
937         case ICP_QAT_HW_CIPHER_ALGO_AES256:
938                 return ICP_QAT_HW_AES_BLK_SZ;
939         default:
940                 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
941                 return -EFAULT;
942         };
943         return -EFAULT;
944 }
945
946 /*
947  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
948  * This is digest size rounded up to nearest quadword
949  */
950 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
951 {
952         switch (qat_hash_alg) {
953         case ICP_QAT_HW_AUTH_ALGO_SHA1:
954                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
955                                                 QAT_HW_DEFAULT_ALIGNMENT);
956         case ICP_QAT_HW_AUTH_ALGO_SHA224:
957                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
958                                                 QAT_HW_DEFAULT_ALIGNMENT);
959         case ICP_QAT_HW_AUTH_ALGO_SHA256:
960                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
961                                                 QAT_HW_DEFAULT_ALIGNMENT);
962         case ICP_QAT_HW_AUTH_ALGO_SHA384:
963                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
964                                                 QAT_HW_DEFAULT_ALIGNMENT);
965         case ICP_QAT_HW_AUTH_ALGO_SHA512:
966                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
967                                                 QAT_HW_DEFAULT_ALIGNMENT);
968         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
969                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
970                                                 QAT_HW_DEFAULT_ALIGNMENT);
971         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
972         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
973                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
974                                                 QAT_HW_DEFAULT_ALIGNMENT);
975         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
976                 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
977                                                 QAT_HW_DEFAULT_ALIGNMENT);
978         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
979                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
980                                                 QAT_HW_DEFAULT_ALIGNMENT);
981         case ICP_QAT_HW_AUTH_ALGO_MD5:
982                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
983                                                 QAT_HW_DEFAULT_ALIGNMENT);
984         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
985                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
986                                                 QAT_HW_DEFAULT_ALIGNMENT);
987         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
988                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
989                                                 QAT_HW_DEFAULT_ALIGNMENT);
990         case ICP_QAT_HW_AUTH_ALGO_NULL:
991                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
992                                                 QAT_HW_DEFAULT_ALIGNMENT);
993         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
994                 /* return maximum state1 size in this case */
995                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
996                                                 QAT_HW_DEFAULT_ALIGNMENT);
997         default:
998                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
999                 return -EFAULT;
1000         };
1001         return -EFAULT;
1002 }
1003
1004 /* returns digest size in bytes  per hash algo */
1005 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1006 {
1007         switch (qat_hash_alg) {
1008         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1009                 return ICP_QAT_HW_SHA1_STATE1_SZ;
1010         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1011                 return ICP_QAT_HW_SHA224_STATE1_SZ;
1012         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1013                 return ICP_QAT_HW_SHA256_STATE1_SZ;
1014         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1015                 return ICP_QAT_HW_SHA384_STATE1_SZ;
1016         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1017                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1018         case ICP_QAT_HW_AUTH_ALGO_MD5:
1019                 return ICP_QAT_HW_MD5_STATE1_SZ;
1020         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1021                 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1022         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1023                 /* return maximum digest size in this case */
1024                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1025         default:
1026                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1027                 return -EFAULT;
1028         };
1029         return -EFAULT;
1030 }
1031
1032 /* returns block size in byes per hash algo */
1033 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1034 {
1035         switch (qat_hash_alg) {
1036         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1037                 return SHA_CBLOCK;
1038         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1039                 return SHA256_CBLOCK;
1040         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1041                 return SHA256_CBLOCK;
1042         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1043                 return SHA512_CBLOCK;
1044         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1045                 return SHA512_CBLOCK;
1046         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1047                 return 16;
1048         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1049                 return ICP_QAT_HW_AES_BLK_SZ;
1050         case ICP_QAT_HW_AUTH_ALGO_MD5:
1051                 return MD5_CBLOCK;
1052         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1053                 /* return maximum block size in this case */
1054                 return SHA512_CBLOCK;
1055         default:
1056                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1057                 return -EFAULT;
1058         };
1059         return -EFAULT;
1060 }
1061
1062 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1063 {
1064         SHA_CTX ctx;
1065
1066         if (!SHA1_Init(&ctx))
1067                 return -EFAULT;
1068         SHA1_Transform(&ctx, data_in);
1069         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1070         return 0;
1071 }
1072
1073 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1074 {
1075         SHA256_CTX ctx;
1076
1077         if (!SHA224_Init(&ctx))
1078                 return -EFAULT;
1079         SHA256_Transform(&ctx, data_in);
1080         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1081         return 0;
1082 }
1083
1084 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1085 {
1086         SHA256_CTX ctx;
1087
1088         if (!SHA256_Init(&ctx))
1089                 return -EFAULT;
1090         SHA256_Transform(&ctx, data_in);
1091         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1092         return 0;
1093 }
1094
1095 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1096 {
1097         SHA512_CTX ctx;
1098
1099         if (!SHA384_Init(&ctx))
1100                 return -EFAULT;
1101         SHA512_Transform(&ctx, data_in);
1102         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1103         return 0;
1104 }
1105
1106 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1107 {
1108         SHA512_CTX ctx;
1109
1110         if (!SHA512_Init(&ctx))
1111                 return -EFAULT;
1112         SHA512_Transform(&ctx, data_in);
1113         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1114         return 0;
1115 }
1116
1117 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1118 {
1119         MD5_CTX ctx;
1120
1121         if (!MD5_Init(&ctx))
1122                 return -EFAULT;
1123         MD5_Transform(&ctx, data_in);
1124         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1125
1126         return 0;
1127 }
1128
1129 static int
1130 partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1131                 uint8_t *data_in, uint8_t *data_out)
1132 {
1133         int digest_size;
1134         uint8_t digest[qat_hash_get_digest_size(
1135                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1136         uint32_t *hash_state_out_be32;
1137         uint64_t *hash_state_out_be64;
1138         int i;
1139
1140         /* Initialize to avoid gcc warning */
1141         memset(digest, 0, sizeof(digest));
1142
1143         digest_size = qat_hash_get_digest_size(hash_alg);
1144         if (digest_size <= 0)
1145                 return -EFAULT;
1146
1147         hash_state_out_be32 = (uint32_t *)data_out;
1148         hash_state_out_be64 = (uint64_t *)data_out;
1149
1150         switch (hash_alg) {
1151         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1152                 if (partial_hash_sha1(data_in, digest))
1153                         return -EFAULT;
1154                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1155                         *hash_state_out_be32 =
1156                                 rte_bswap32(*(((uint32_t *)digest)+i));
1157                 break;
1158         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1159                 if (partial_hash_sha224(data_in, digest))
1160                         return -EFAULT;
1161                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1162                         *hash_state_out_be32 =
1163                                 rte_bswap32(*(((uint32_t *)digest)+i));
1164                 break;
1165         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1166                 if (partial_hash_sha256(data_in, digest))
1167                         return -EFAULT;
1168                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1169                         *hash_state_out_be32 =
1170                                 rte_bswap32(*(((uint32_t *)digest)+i));
1171                 break;
1172         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1173                 if (partial_hash_sha384(data_in, digest))
1174                         return -EFAULT;
1175                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1176                         *hash_state_out_be64 =
1177                                 rte_bswap64(*(((uint64_t *)digest)+i));
1178                 break;
1179         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1180                 if (partial_hash_sha512(data_in, digest))
1181                         return -EFAULT;
1182                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1183                         *hash_state_out_be64 =
1184                                 rte_bswap64(*(((uint64_t *)digest)+i));
1185                 break;
1186         case ICP_QAT_HW_AUTH_ALGO_MD5:
1187                 if (partial_hash_md5(data_in, data_out))
1188                         return -EFAULT;
1189                 break;
1190         default:
1191                 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1192                 return -EFAULT;
1193         }
1194
1195         return 0;
1196 }
1197 #define HMAC_IPAD_VALUE 0x36
1198 #define HMAC_OPAD_VALUE 0x5c
1199 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1200
1201 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1202
1203 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1204 {
1205         int i;
1206
1207         derived[0] = base[0] << 1;
1208         for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1209                 derived[i] = base[i] << 1;
1210                 derived[i - 1] |= base[i] >> 7;
1211         }
1212
1213         if (base[0] & 0x80)
1214                 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1215 }
1216
1217 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1218                                 const uint8_t *auth_key,
1219                                 uint16_t auth_keylen,
1220                                 uint8_t *p_state_buf,
1221                                 uint16_t *p_state_len,
1222                                 uint8_t aes_cmac)
1223 {
1224         int block_size;
1225         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1226         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1227         int i;
1228
1229         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1230
1231                 /* CMAC */
1232                 if (aes_cmac) {
1233                         AES_KEY enc_key;
1234                         uint8_t *in = NULL;
1235                         uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1236                         uint8_t *k1, *k2;
1237
1238                         auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1239
1240                         in = rte_zmalloc("AES CMAC K1",
1241                                          ICP_QAT_HW_AES_128_KEY_SZ, 16);
1242
1243                         if (in == NULL) {
1244                                 QAT_LOG(ERR, "Failed to alloc memory");
1245                                 return -ENOMEM;
1246                         }
1247
1248                         rte_memcpy(in, AES_CMAC_SEED,
1249                                    ICP_QAT_HW_AES_128_KEY_SZ);
1250                         rte_memcpy(p_state_buf, auth_key, auth_keylen);
1251
1252                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1253                                 &enc_key) != 0) {
1254                                 rte_free(in);
1255                                 return -EFAULT;
1256                         }
1257
1258                         AES_encrypt(in, k0, &enc_key);
1259
1260                         k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1261                         k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1262
1263                         aes_cmac_key_derive(k0, k1);
1264                         aes_cmac_key_derive(k1, k2);
1265
1266                         memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1267                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1268                         rte_free(in);
1269                         return 0;
1270                 } else {
1271                         static uint8_t qat_aes_xcbc_key_seed[
1272                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1273                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1274                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1275                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1276                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1277                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1278                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1279                         };
1280
1281                         uint8_t *in = NULL;
1282                         uint8_t *out = p_state_buf;
1283                         int x;
1284                         AES_KEY enc_key;
1285
1286                         in = rte_zmalloc("working mem for key",
1287                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1288                         if (in == NULL) {
1289                                 QAT_LOG(ERR, "Failed to alloc memory");
1290                                 return -ENOMEM;
1291                         }
1292
1293                         rte_memcpy(in, qat_aes_xcbc_key_seed,
1294                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1295                         for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1296                                 if (AES_set_encrypt_key(auth_key,
1297                                                         auth_keylen << 3,
1298                                                         &enc_key) != 0) {
1299                                         rte_free(in -
1300                                           (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1301                                         memset(out -
1302                                            (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1303                                           0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1304                                         return -EFAULT;
1305                                 }
1306                                 AES_encrypt(in, out, &enc_key);
1307                                 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1308                                 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1309                         }
1310                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1311                         rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1312                         return 0;
1313                 }
1314
1315         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1316                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1317                 uint8_t *in = NULL;
1318                 uint8_t *out = p_state_buf;
1319                 AES_KEY enc_key;
1320
1321                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1322                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1323                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1324                 in = rte_zmalloc("working mem for key",
1325                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
1326                 if (in == NULL) {
1327                         QAT_LOG(ERR, "Failed to alloc memory");
1328                         return -ENOMEM;
1329                 }
1330
1331                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1332                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1333                         &enc_key) != 0) {
1334                         return -EFAULT;
1335                 }
1336                 AES_encrypt(in, out, &enc_key);
1337                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1338                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1339                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1340                 rte_free(in);
1341                 return 0;
1342         }
1343
1344         block_size = qat_hash_get_block_size(hash_alg);
1345         if (block_size < 0)
1346                 return block_size;
1347         /* init ipad and opad from key and xor with fixed values */
1348         memset(ipad, 0, block_size);
1349         memset(opad, 0, block_size);
1350
1351         if (auth_keylen > (unsigned int)block_size) {
1352                 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1353                 return -EFAULT;
1354         }
1355         rte_memcpy(ipad, auth_key, auth_keylen);
1356         rte_memcpy(opad, auth_key, auth_keylen);
1357
1358         for (i = 0; i < block_size; i++) {
1359                 uint8_t *ipad_ptr = ipad + i;
1360                 uint8_t *opad_ptr = opad + i;
1361                 *ipad_ptr ^= HMAC_IPAD_VALUE;
1362                 *opad_ptr ^= HMAC_OPAD_VALUE;
1363         }
1364
1365         /* do partial hash of ipad and copy to state1 */
1366         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1367                 memset(ipad, 0, block_size);
1368                 memset(opad, 0, block_size);
1369                 QAT_LOG(ERR, "ipad precompute failed");
1370                 return -EFAULT;
1371         }
1372
1373         /*
1374          * State len is a multiple of 8, so may be larger than the digest.
1375          * Put the partial hash of opad state_len bytes after state1
1376          */
1377         *p_state_len = qat_hash_get_state1_size(hash_alg);
1378         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1379                 memset(ipad, 0, block_size);
1380                 memset(opad, 0, block_size);
1381                 QAT_LOG(ERR, "opad precompute failed");
1382                 return -EFAULT;
1383         }
1384
1385         /*  don't leave data lying around */
1386         memset(ipad, 0, block_size);
1387         memset(opad, 0, block_size);
1388         return 0;
1389 }
1390
1391 static void
1392 qat_sym_session_init_common_hdr(struct qat_sym_session *session)
1393 {
1394         struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
1395         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1396         enum qat_sym_proto_flag proto_flags = session->qat_proto_flag;
1397         uint32_t slice_flags = session->slice_types;
1398
1399         header->hdr_flags =
1400                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1401         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1402         header->service_cmd_id = session->qat_cmd;
1403         header->comn_req_flags =
1404                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1405                                         QAT_COMN_PTR_TYPE_FLAT);
1406         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1407                                   ICP_QAT_FW_LA_PARTIAL_NONE);
1408         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1409                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1410
1411         switch (proto_flags)            {
1412         case QAT_CRYPTO_PROTO_FLAG_NONE:
1413                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1414                                         ICP_QAT_FW_LA_NO_PROTO);
1415                 break;
1416         case QAT_CRYPTO_PROTO_FLAG_CCM:
1417                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1418                                         ICP_QAT_FW_LA_CCM_PROTO);
1419                 break;
1420         case QAT_CRYPTO_PROTO_FLAG_GCM:
1421                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1422                                         ICP_QAT_FW_LA_GCM_PROTO);
1423                 break;
1424         case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1425                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1426                                         ICP_QAT_FW_LA_SNOW_3G_PROTO);
1427                 break;
1428         case QAT_CRYPTO_PROTO_FLAG_ZUC:
1429                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1430                         ICP_QAT_FW_LA_ZUC_3G_PROTO);
1431                 break;
1432         }
1433
1434         /* More than one of the following flags can be set at once */
1435         if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_SPC)) {
1436                 ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
1437                         header->serv_specif_flags,
1438                         ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
1439         }
1440         if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_UCS)) {
1441                 ICP_QAT_FW_LA_SLICE_TYPE_SET(
1442                         header->serv_specif_flags,
1443                         ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
1444         }
1445
1446         if (session->is_auth) {
1447                 if (session->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1448                         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1449                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1450                         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1451                                         ICP_QAT_FW_LA_CMP_AUTH_RES);
1452                 } else if (session->auth_op == ICP_QAT_HW_AUTH_GENERATE) {
1453                         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1454                                                 ICP_QAT_FW_LA_RET_AUTH_RES);
1455                         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1456                                                 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1457                 }
1458         } else {
1459                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1460                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1461                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1462                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1463         }
1464
1465         if (session->is_iv12B) {
1466                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1467                         header->serv_specif_flags,
1468                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1469         }
1470
1471         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1472                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
1473         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1474                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1475 }
1476
1477 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
1478                                                 const uint8_t *cipherkey,
1479                                                 uint32_t cipherkeylen)
1480 {
1481         struct icp_qat_hw_cipher_algo_blk *cipher;
1482         struct icp_qat_hw_cipher_algo_blk20 *cipher20;
1483         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1484         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1485         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1486         void *ptr = &req_tmpl->cd_ctrl;
1487         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1488         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1489         enum icp_qat_hw_cipher_convert key_convert;
1490         struct icp_qat_fw_la_cipher_20_req_params *req_ucs =
1491                         (struct icp_qat_fw_la_cipher_20_req_params *)
1492                         &cdesc->fw_req.serv_specif_rqpars;
1493         struct icp_qat_fw_la_cipher_req_params *req_cipher =
1494                         (struct icp_qat_fw_la_cipher_req_params *)
1495                         &cdesc->fw_req.serv_specif_rqpars;
1496         uint32_t total_key_size;
1497         uint16_t cipher_offset, cd_size;
1498         uint32_t wordIndex  = 0;
1499         uint32_t *temp_key = NULL;
1500
1501         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1502                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1503                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1504                                         ICP_QAT_FW_SLICE_CIPHER);
1505                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1506                                         ICP_QAT_FW_SLICE_DRAM_WR);
1507                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1508                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1509                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1510                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1511                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1512         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1513                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1514                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1515                                         ICP_QAT_FW_SLICE_CIPHER);
1516                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1517                                         ICP_QAT_FW_SLICE_AUTH);
1518                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1519                                         ICP_QAT_FW_SLICE_AUTH);
1520                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1521                                         ICP_QAT_FW_SLICE_DRAM_WR);
1522                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1523         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1524                 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1525                 return -EFAULT;
1526         }
1527
1528         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1529                 /*
1530                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
1531                  * Overriding default values previously set.
1532                  * Chacha20-Poly1305 is special case, CTR but single-pass
1533                  * so both direction need to be used.
1534                  */
1535                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1536                 if (cdesc->qat_cipher_alg ==
1537                         ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 &&
1538                         cdesc->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1539                                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
1540                 }
1541                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1542         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1543                 || cdesc->qat_cipher_alg ==
1544                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1545                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1546         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1547                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1548         else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE)
1549                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1550         else
1551                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1552
1553         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1554                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1555                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1556                 cipher_cd_ctrl->cipher_state_sz =
1557                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1558                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1559
1560         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1561                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1562                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1563                 cipher_cd_ctrl->cipher_padding_sz =
1564                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1565         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1566                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1567                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1568         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1569                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1570                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1571         } else if (cdesc->qat_cipher_alg ==
1572                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1573                 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1574                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1575                 cipher_cd_ctrl->cipher_state_sz =
1576                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1577                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1578         } else {
1579                 total_key_size = cipherkeylen;
1580                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1581         }
1582         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1583         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1584
1585         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1586         cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
1587         cipher->cipher_config.val =
1588             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1589                                         cdesc->qat_cipher_alg, key_convert,
1590                                         cdesc->qat_dir);
1591
1592         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1593                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1594                                         sizeof(struct icp_qat_hw_cipher_config)
1595                                         + cipherkeylen);
1596                 memcpy(cipher->key, cipherkey, cipherkeylen);
1597                 memcpy(temp_key, cipherkey, cipherkeylen);
1598
1599                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1600                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1601                                                                 wordIndex++)
1602                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1603
1604                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1605                                         cipherkeylen + cipherkeylen;
1606         } else if (cdesc->is_ucs) {
1607                 const uint8_t *final_key = cipherkey;
1608
1609                 cdesc->slice_types |= QAT_CRYPTO_SLICE_UCS;
1610                 total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
1611                         ICP_QAT_HW_AES_128_KEY_SZ);
1612                 cipher20->cipher_config.reserved[0] = 0;
1613                 cipher20->cipher_config.reserved[1] = 0;
1614                 cipher20->cipher_config.reserved[2] = 0;
1615
1616                 rte_memcpy(cipher20->key, final_key, cipherkeylen);
1617                 cdesc->cd_cur_ptr +=
1618                         sizeof(struct icp_qat_hw_ucs_cipher_config) +
1619                                         cipherkeylen;
1620         } else {
1621                 memcpy(cipher->key, cipherkey, cipherkeylen);
1622                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1623                                         cipherkeylen;
1624         }
1625
1626         if (cdesc->is_single_pass) {
1627                 QAT_FIELD_SET(cipher->cipher_config.val,
1628                         cdesc->digest_length,
1629                         QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
1630                         QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
1631                 /* UCS and SPC 1.8/2.0 share configuration of 2nd config word */
1632                 cdesc->cd.cipher.cipher_config.reserved =
1633                                 ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
1634                                         cdesc->aad_len);
1635                 cdesc->slice_types |= QAT_CRYPTO_SLICE_SPC;
1636         }
1637
1638         if (total_key_size > cipherkeylen) {
1639                 uint32_t padding_size =  total_key_size-cipherkeylen;
1640                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1641                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1642                         /* K3 not provided so use K1 = K3*/
1643                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1644                 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1645                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1646                         /* K2 and K3 not provided so use K1 = K2 = K3*/
1647                         memcpy(cdesc->cd_cur_ptr, cipherkey,
1648                                 cipherkeylen);
1649                         memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1650                                 cipherkey, cipherkeylen);
1651                 } else
1652                         memset(cdesc->cd_cur_ptr, 0, padding_size);
1653
1654                 cdesc->cd_cur_ptr += padding_size;
1655         }
1656         if (cdesc->is_ucs) {
1657                 /*
1658                  * These values match in terms of position auth
1659                  * slice request fields
1660                  */
1661                 req_ucs->spc_auth_res_sz = cdesc->digest_length;
1662                 if (!cdesc->is_gmac) {
1663                         req_ucs->spc_aad_sz = cdesc->aad_len;
1664                         req_ucs->spc_aad_offset = 0;
1665                 }
1666         } else if (cdesc->is_single_pass) {
1667                 req_cipher->spc_aad_sz = cdesc->aad_len;
1668                 req_cipher->spc_auth_res_sz = cdesc->digest_length;
1669         }
1670         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1671         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1672         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1673
1674         return 0;
1675 }
1676
1677 int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
1678                                                 const uint8_t *authkey,
1679                                                 uint32_t authkeylen,
1680                                                 uint32_t aad_length,
1681                                                 uint32_t digestsize,
1682                                                 unsigned int operation)
1683 {
1684         struct icp_qat_hw_auth_setup *hash;
1685         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1686         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1687         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1688         void *ptr = &req_tmpl->cd_ctrl;
1689         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1690         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1691         struct icp_qat_fw_la_auth_req_params *auth_param =
1692                 (struct icp_qat_fw_la_auth_req_params *)
1693                 ((char *)&req_tmpl->serv_specif_rqpars +
1694                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1695         uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1696         uint16_t hash_offset, cd_size;
1697         uint32_t *aad_len = NULL;
1698         uint32_t wordIndex  = 0;
1699         uint32_t *pTempKey;
1700
1701         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1702                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1703                                         ICP_QAT_FW_SLICE_AUTH);
1704                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1705                                         ICP_QAT_FW_SLICE_DRAM_WR);
1706                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1707         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1708                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1709                                 ICP_QAT_FW_SLICE_AUTH);
1710                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1711                                 ICP_QAT_FW_SLICE_CIPHER);
1712                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1713                                 ICP_QAT_FW_SLICE_CIPHER);
1714                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1715                                 ICP_QAT_FW_SLICE_DRAM_WR);
1716                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1717         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1718                 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1719                 return -EFAULT;
1720         }
1721
1722         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY)
1723                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
1724         else
1725                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
1726
1727         /*
1728          * Setup the inner hash config
1729          */
1730         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1731         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
1732         hash->auth_config.reserved = 0;
1733         hash->auth_config.config =
1734                         ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
1735                                 cdesc->qat_hash_alg, digestsize);
1736
1737         if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
1738                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
1739                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
1740                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
1741                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
1742                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
1743                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
1744                 || cdesc->is_cnt_zero
1745                         )
1746                 hash->auth_counter.counter = 0;
1747         else {
1748                 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
1749
1750                 if (block_size < 0)
1751                         return block_size;
1752                 hash->auth_counter.counter = rte_bswap32(block_size);
1753         }
1754
1755         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
1756
1757         /*
1758          * cd_cur_ptr now points at the state1 information.
1759          */
1760         switch (cdesc->qat_hash_alg) {
1761         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1762                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1763                         /* Plain SHA-1 */
1764                         rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
1765                                         sizeof(sha1InitialState));
1766                         state1_size = qat_hash_get_state1_size(
1767                                         cdesc->qat_hash_alg);
1768                         break;
1769                 }
1770                 /* SHA-1 HMAC */
1771                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
1772                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1773                         cdesc->aes_cmac)) {
1774                         QAT_LOG(ERR, "(SHA)precompute failed");
1775                         return -EFAULT;
1776                 }
1777                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
1778                 break;
1779         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1780                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1781                         /* Plain SHA-224 */
1782                         rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
1783                                         sizeof(sha224InitialState));
1784                         state1_size = qat_hash_get_state1_size(
1785                                         cdesc->qat_hash_alg);
1786                         break;
1787                 }
1788                 /* SHA-224 HMAC */
1789                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
1790                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1791                         cdesc->aes_cmac)) {
1792                         QAT_LOG(ERR, "(SHA)precompute failed");
1793                         return -EFAULT;
1794                 }
1795                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
1796                 break;
1797         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1798                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1799                         /* Plain SHA-256 */
1800                         rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
1801                                         sizeof(sha256InitialState));
1802                         state1_size = qat_hash_get_state1_size(
1803                                         cdesc->qat_hash_alg);
1804                         break;
1805                 }
1806                 /* SHA-256 HMAC */
1807                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
1808                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1809                         cdesc->aes_cmac)) {
1810                         QAT_LOG(ERR, "(SHA)precompute failed");
1811                         return -EFAULT;
1812                 }
1813                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
1814                 break;
1815         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1816                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1817                         /* Plain SHA-384 */
1818                         rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
1819                                         sizeof(sha384InitialState));
1820                         state1_size = qat_hash_get_state1_size(
1821                                         cdesc->qat_hash_alg);
1822                         break;
1823                 }
1824                 /* SHA-384 HMAC */
1825                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
1826                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1827                         cdesc->aes_cmac)) {
1828                         QAT_LOG(ERR, "(SHA)precompute failed");
1829                         return -EFAULT;
1830                 }
1831                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
1832                 break;
1833         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1834                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1835                         /* Plain SHA-512 */
1836                         rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
1837                                         sizeof(sha512InitialState));
1838                         state1_size = qat_hash_get_state1_size(
1839                                         cdesc->qat_hash_alg);
1840                         break;
1841                 }
1842                 /* SHA-512 HMAC */
1843                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
1844                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1845                         cdesc->aes_cmac)) {
1846                         QAT_LOG(ERR, "(SHA)precompute failed");
1847                         return -EFAULT;
1848                 }
1849                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
1850                 break;
1851         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1852                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1853
1854                 if (cdesc->aes_cmac)
1855                         memset(cdesc->cd_cur_ptr, 0, state1_size);
1856                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
1857                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
1858                         &state2_size, cdesc->aes_cmac)) {
1859                         cdesc->aes_cmac ? QAT_LOG(ERR,
1860                                                   "(CMAC)precompute failed")
1861                                         : QAT_LOG(ERR,
1862                                                   "(XCBC)precompute failed");
1863                         return -EFAULT;
1864                 }
1865                 break;
1866         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1867         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1868                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1869                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
1870                 if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
1871                         authkeylen, cdesc->cd_cur_ptr + state1_size,
1872                         &state2_size, cdesc->aes_cmac)) {
1873                         QAT_LOG(ERR, "(GCM)precompute failed");
1874                         return -EFAULT;
1875                 }
1876                 /*
1877                  * Write (the length of AAD) into bytes 16-19 of state2
1878                  * in big-endian format. This field is 8 bytes
1879                  */
1880                 auth_param->u2.aad_sz =
1881                                 RTE_ALIGN_CEIL(aad_length, 16);
1882                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
1883
1884                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
1885                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
1886                                         ICP_QAT_HW_GALOIS_H_SZ);
1887                 *aad_len = rte_bswap32(aad_length);
1888                 cdesc->aad_len = aad_length;
1889                 break;
1890         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1891                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1892                 state1_size = qat_hash_get_state1_size(
1893                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
1894                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
1895                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1896
1897                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
1898                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
1899                 cipherconfig->cipher_config.val =
1900                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
1901                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
1902                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
1903                         ICP_QAT_HW_CIPHER_ENCRYPT);
1904                 memcpy(cipherconfig->key, authkey, authkeylen);
1905                 memset(cipherconfig->key + authkeylen,
1906                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
1907                 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
1908                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1909                 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1910                 break;
1911         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1912                 hash->auth_config.config =
1913                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
1914                                 cdesc->qat_hash_alg, digestsize);
1915                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1916                 state1_size = qat_hash_get_state1_size(
1917                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
1918                 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
1919                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
1920                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
1921
1922                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1923                 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1924                 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1925
1926                 break;
1927         case ICP_QAT_HW_AUTH_ALGO_MD5:
1928                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
1929                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1930                         cdesc->aes_cmac)) {
1931                         QAT_LOG(ERR, "(MD5)precompute failed");
1932                         return -EFAULT;
1933                 }
1934                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
1935                 break;
1936         case ICP_QAT_HW_AUTH_ALGO_NULL:
1937                 state1_size = qat_hash_get_state1_size(
1938                                 ICP_QAT_HW_AUTH_ALGO_NULL);
1939                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
1940                 break;
1941         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1942                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
1943                 state1_size = qat_hash_get_state1_size(
1944                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
1945                 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
1946                                 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
1947
1948                 if (aad_length > 0) {
1949                         aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
1950                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
1951                         auth_param->u2.aad_sz =
1952                         RTE_ALIGN_CEIL(aad_length,
1953                         ICP_QAT_HW_CCM_AAD_ALIGNMENT);
1954                 } else {
1955                         auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
1956                 }
1957                 cdesc->aad_len = aad_length;
1958                 hash->auth_counter.counter = 0;
1959
1960                 hash_cd_ctrl->outer_prefix_sz = digestsize;
1961                 auth_param->hash_state_sz = digestsize;
1962
1963                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1964                 break;
1965         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1966                 state1_size = qat_hash_get_state1_size(
1967                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
1968                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
1969                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1970                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
1971                                                         + authkeylen);
1972                 /*
1973                 * The Inner Hash Initial State2 block must contain IK
1974                 * (Initialisation Key), followed by IK XOR-ed with KM
1975                 * (Key Modifier): IK||(IK^KM).
1976                 */
1977                 /* write the auth key */
1978                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1979                 /* initialise temp key with auth key */
1980                 memcpy(pTempKey, authkey, authkeylen);
1981                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
1982                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
1983                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
1984                 break;
1985         default:
1986                 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
1987                 return -EFAULT;
1988         }
1989
1990         /* Auth CD config setup */
1991         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
1992         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
1993         hash_cd_ctrl->inner_res_sz = digestsize;
1994         hash_cd_ctrl->final_sz = digestsize;
1995         hash_cd_ctrl->inner_state1_sz = state1_size;
1996         auth_param->auth_res_sz = digestsize;
1997
1998         hash_cd_ctrl->inner_state2_sz  = state2_size;
1999         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2000                         ((sizeof(struct icp_qat_hw_auth_setup) +
2001                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2002                                         >> 3);
2003
2004         cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2005         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2006
2007         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2008         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2009
2010         return 0;
2011 }
2012
2013 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2014 {
2015         switch (key_len) {
2016         case ICP_QAT_HW_AES_128_KEY_SZ:
2017                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2018                 break;
2019         case ICP_QAT_HW_AES_192_KEY_SZ:
2020                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2021                 break;
2022         case ICP_QAT_HW_AES_256_KEY_SZ:
2023                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2024                 break;
2025         default:
2026                 return -EINVAL;
2027         }
2028         return 0;
2029 }
2030
2031 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2032                 enum icp_qat_hw_cipher_algo *alg)
2033 {
2034         switch (key_len) {
2035         case ICP_QAT_HW_AES_128_KEY_SZ:
2036                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2037                 break;
2038         case ICP_QAT_HW_AES_256_KEY_SZ:
2039                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2040                 break;
2041         default:
2042                 return -EINVAL;
2043         }
2044         return 0;
2045 }
2046
2047 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2048 {
2049         switch (key_len) {
2050         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2051                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2052                 break;
2053         default:
2054                 return -EINVAL;
2055         }
2056         return 0;
2057 }
2058
2059 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2060 {
2061         switch (key_len) {
2062         case ICP_QAT_HW_KASUMI_KEY_SZ:
2063                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2064                 break;
2065         default:
2066                 return -EINVAL;
2067         }
2068         return 0;
2069 }
2070
2071 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2072 {
2073         switch (key_len) {
2074         case ICP_QAT_HW_DES_KEY_SZ:
2075                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2076                 break;
2077         default:
2078                 return -EINVAL;
2079         }
2080         return 0;
2081 }
2082
2083 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2084 {
2085         switch (key_len) {
2086         case QAT_3DES_KEY_SZ_OPT1:
2087         case QAT_3DES_KEY_SZ_OPT2:
2088         case QAT_3DES_KEY_SZ_OPT3:
2089                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2090                 break;
2091         default:
2092                 return -EINVAL;
2093         }
2094         return 0;
2095 }
2096
2097 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2098 {
2099         switch (key_len) {
2100         case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2101                 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2102                 break;
2103         default:
2104                 return -EINVAL;
2105         }
2106         return 0;
2107 }
2108
2109 #ifdef RTE_LIB_SECURITY
2110 static int
2111 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2112 {
2113         struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2114         struct rte_security_docsis_xform *docsis = &conf->docsis;
2115
2116         /* CRC generate -> Cipher encrypt */
2117         if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2118
2119                 if (crypto_sym != NULL &&
2120                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2121                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2122                     crypto_sym->cipher.algo ==
2123                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2124                     (crypto_sym->cipher.key.length ==
2125                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2126                      crypto_sym->cipher.key.length ==
2127                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2128                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2129                     crypto_sym->next == NULL) {
2130                         return 0;
2131                 }
2132         /* Cipher decrypt -> CRC verify */
2133         } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2134
2135                 if (crypto_sym != NULL &&
2136                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2137                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2138                     crypto_sym->cipher.algo ==
2139                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2140                     (crypto_sym->cipher.key.length ==
2141                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2142                      crypto_sym->cipher.key.length ==
2143                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2144                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2145                     crypto_sym->next == NULL) {
2146                         return 0;
2147                 }
2148         }
2149
2150         return -EINVAL;
2151 }
2152
2153 static int
2154 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2155                 struct rte_security_session_conf *conf, void *session_private)
2156 {
2157         int ret;
2158         int qat_cmd_id;
2159         struct rte_crypto_sym_xform *xform = NULL;
2160         struct qat_sym_session *session = session_private;
2161
2162         /* Clear the session */
2163         memset(session, 0, qat_sym_session_get_private_size(dev));
2164
2165         ret = qat_sec_session_check_docsis(conf);
2166         if (ret) {
2167                 QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2168                 return ret;
2169         }
2170
2171         xform = conf->crypto_xform;
2172
2173         /* Verify the session physical address is known */
2174         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2175         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2176                 QAT_LOG(ERR,
2177                         "Session physical address unknown. Bad memory pool.");
2178                 return -EINVAL;
2179         }
2180
2181         /* Set context descriptor physical address */
2182         session->cd_paddr = session_paddr +
2183                         offsetof(struct qat_sym_session, cd);
2184
2185         /* Get requested QAT command id - should be cipher */
2186         qat_cmd_id = qat_get_cmd_id(xform);
2187         if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2188                 QAT_LOG(ERR, "Unsupported xform chain requested");
2189                 return -ENOTSUP;
2190         }
2191         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2192
2193         ret = qat_sym_session_configure_cipher(dev, xform, session);
2194         if (ret < 0)
2195                 return ret;
2196         qat_sym_session_finalize(session);
2197
2198         return 0;
2199 }
2200
2201 int
2202 qat_security_session_create(void *dev,
2203                                 struct rte_security_session_conf *conf,
2204                                 struct rte_security_session *sess,
2205                                 struct rte_mempool *mempool)
2206 {
2207         void *sess_private_data;
2208         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2209         struct qat_cryptodev_private *internals = cdev->data->dev_private;
2210         enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
2211         struct qat_sym_session *sym_session = NULL;
2212         int ret;
2213
2214         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2215                         conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2216                 QAT_LOG(ERR, "Invalid security protocol");
2217                 return -EINVAL;
2218         }
2219
2220         if (rte_mempool_get(mempool, &sess_private_data)) {
2221                 QAT_LOG(ERR, "Couldn't get object from session mempool");
2222                 return -ENOMEM;
2223         }
2224
2225         ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2226                         sess_private_data);
2227         if (ret != 0) {
2228                 QAT_LOG(ERR, "Failed to configure session parameters");
2229                 /* Return session to mempool */
2230                 rte_mempool_put(mempool, sess_private_data);
2231                 return ret;
2232         }
2233
2234         set_sec_session_private_data(sess, sess_private_data);
2235         sym_session = (struct qat_sym_session *)sess_private_data;
2236         sym_session->dev_id = internals->dev_id;
2237
2238         return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
2239                         sess_private_data);
2240 }
2241
2242 int
2243 qat_security_session_destroy(void *dev __rte_unused,
2244                                  struct rte_security_session *sess)
2245 {
2246         void *sess_priv = get_sec_session_private_data(sess);
2247         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2248
2249         if (sess_priv) {
2250                 if (s->bpi_ctx)
2251                         bpi_cipher_ctx_free(s->bpi_ctx);
2252                 memset(s, 0, qat_sym_session_get_private_size(dev));
2253                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2254
2255                 set_sec_session_private_data(sess, NULL);
2256                 rte_mempool_put(sess_mp, sess_priv);
2257         }
2258         return 0;
2259 }
2260 #endif