crypto/qat: use intel-ipsec-mb for partial hash and AES
[dpdk.git] / drivers / crypto / qat / qat_sym_session.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  * Copyright(c) 2015-2022 Intel Corporation
3  */
4
5 #define OPENSSL_API_COMPAT 0x10100000L
6
7 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
8 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
9 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
10 #include <openssl/evp.h>        /* Needed for bpi runt block processing */
11
12 #ifdef RTE_QAT_LIBIPSECMB
13 #include <intel-ipsec-mb.h>
14 #endif
15
16 #include <rte_memcpy.h>
17 #include <rte_common.h>
18 #include <rte_spinlock.h>
19 #include <rte_byteorder.h>
20 #include <rte_log.h>
21 #include <rte_malloc.h>
22 #include <rte_crypto_sym.h>
23 #ifdef RTE_LIB_SECURITY
24 #include <rte_security.h>
25 #endif
26
27 #include "qat_logs.h"
28 #include "qat_sym_session.h"
29 #include "qat_sym.h"
30
31 #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
32 #include <openssl/provider.h>
33 #endif
34
35 extern int qat_ipsec_mb_lib;
36
37 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
38 static const uint8_t sha1InitialState[] = {
39         0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
40         0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
41
42 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
43 static const uint8_t sha224InitialState[] = {
44         0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
45         0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
46         0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
47
48 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
49 static const uint8_t sha256InitialState[] = {
50         0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
51         0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
52         0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
53
54 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
55 static const uint8_t sha384InitialState[] = {
56         0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
57         0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
58         0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
59         0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
60         0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
61         0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
62
63 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
64 static const uint8_t sha512InitialState[] = {
65         0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
66         0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
67         0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
68         0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
69         0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
70         0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
71
72 static int
73 qat_sym_cd_cipher_set(struct qat_sym_session *cd,
74                                                 const uint8_t *enckey,
75                                                 uint32_t enckeylen);
76
77 static int
78 qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
79                                                 const uint8_t *authkey,
80                                                 uint32_t authkeylen,
81                                                 uint32_t aad_length,
82                                                 uint32_t digestsize,
83                                                 unsigned int operation);
84 static void
85 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
86
87 /* Req/cd init functions */
88
89 static void
90 qat_sym_session_finalize(struct qat_sym_session *session)
91 {
92         qat_sym_session_init_common_hdr(session);
93 }
94
95 /** Frees a context previously created
96  *  Depends on openssl libcrypto
97  */
98 static void
99 bpi_cipher_ctx_free(void *bpi_ctx)
100 {
101         if (bpi_ctx != NULL)
102                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
103 }
104
105 /** Creates a context in either AES or DES in ECB mode
106  *  Depends on openssl libcrypto
107  */
108 static int
109 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
110                 enum rte_crypto_cipher_operation direction __rte_unused,
111                 const uint8_t *key, uint16_t key_length, void **ctx)
112 {
113         const EVP_CIPHER *algo = NULL;
114         int ret;
115         *ctx = EVP_CIPHER_CTX_new();
116
117         if (*ctx == NULL) {
118                 ret = -ENOMEM;
119                 goto ctx_init_err;
120         }
121
122         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
123                 algo = EVP_des_ecb();
124         else
125                 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
126                         algo = EVP_aes_128_ecb();
127                 else
128                         algo = EVP_aes_256_ecb();
129
130         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
131         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
132                 ret = -EINVAL;
133                 goto ctx_init_err;
134         }
135
136         return 0;
137
138 ctx_init_err:
139         if (*ctx != NULL)
140                 EVP_CIPHER_CTX_free(*ctx);
141         return ret;
142 }
143
144 static int
145 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
146                 struct qat_cryptodev_private *internals)
147 {
148         int i = 0;
149         const struct rte_cryptodev_capabilities *capability;
150
151         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
152                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
153                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
154                         continue;
155
156                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
157                         continue;
158
159                 if (capability->sym.cipher.algo == algo)
160                         return 1;
161         }
162         return 0;
163 }
164
165 static int
166 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
167                 struct qat_cryptodev_private *internals)
168 {
169         int i = 0;
170         const struct rte_cryptodev_capabilities *capability;
171
172         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
173                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
174                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
175                         continue;
176
177                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
178                         continue;
179
180                 if (capability->sym.auth.algo == algo)
181                         return 1;
182         }
183         return 0;
184 }
185
186 void
187 qat_sym_session_clear(struct rte_cryptodev *dev,
188                 struct rte_cryptodev_sym_session *sess)
189 {
190         uint8_t index = dev->driver_id;
191         void *sess_priv = get_sym_session_private_data(sess, index);
192         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
193
194         if (sess_priv) {
195                 if (s->bpi_ctx)
196                         bpi_cipher_ctx_free(s->bpi_ctx);
197                 memset(s, 0, qat_sym_session_get_private_size(dev));
198                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
199
200                 set_sym_session_private_data(sess, index, NULL);
201                 rte_mempool_put(sess_mp, sess_priv);
202         }
203 }
204
205 static int
206 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
207 {
208         /* Cipher Only */
209         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
210                 return ICP_QAT_FW_LA_CMD_CIPHER;
211
212         /* Authentication Only */
213         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
214                 return ICP_QAT_FW_LA_CMD_AUTH;
215
216         /* AEAD */
217         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
218                 /* AES-GCM and AES-CCM works with different direction
219                  * GCM first encrypts and generate hash where AES-CCM
220                  * first generate hash and encrypts. Similar relation
221                  * applies to decryption.
222                  */
223                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
224                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
225                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
226                         else
227                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
228                 else
229                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
230                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
231                         else
232                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
233         }
234
235         if (xform->next == NULL)
236                 return -1;
237
238         /* Cipher then Authenticate */
239         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
240                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
241                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
242
243         /* Authenticate then Cipher */
244         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
245                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
246                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
247
248         return -1;
249 }
250
251 static struct rte_crypto_auth_xform *
252 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
253 {
254         do {
255                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
256                         return &xform->auth;
257
258                 xform = xform->next;
259         } while (xform);
260
261         return NULL;
262 }
263
264 static struct rte_crypto_cipher_xform *
265 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
266 {
267         do {
268                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
269                         return &xform->cipher;
270
271                 xform = xform->next;
272         } while (xform);
273
274         return NULL;
275 }
276
277 int
278 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
279                 struct rte_crypto_sym_xform *xform,
280                 struct qat_sym_session *session)
281 {
282         struct qat_cryptodev_private *internals = dev->data->dev_private;
283         struct rte_crypto_cipher_xform *cipher_xform = NULL;
284         enum qat_device_gen qat_dev_gen =
285                                 internals->qat_dev->qat_dev_gen;
286         int ret;
287
288         /* Get cipher xform from crypto xform chain */
289         cipher_xform = qat_get_cipher_xform(xform);
290
291         session->cipher_iv.offset = cipher_xform->iv.offset;
292         session->cipher_iv.length = cipher_xform->iv.length;
293
294         switch (cipher_xform->algo) {
295         case RTE_CRYPTO_CIPHER_AES_CBC:
296                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
297                                 &session->qat_cipher_alg) != 0) {
298                         QAT_LOG(ERR, "Invalid AES cipher key size");
299                         ret = -EINVAL;
300                         goto error_out;
301                 }
302                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
303                 break;
304         case RTE_CRYPTO_CIPHER_AES_CTR:
305                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
306                                 &session->qat_cipher_alg) != 0) {
307                         QAT_LOG(ERR, "Invalid AES cipher key size");
308                         ret = -EINVAL;
309                         goto error_out;
310                 }
311                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
312                 if (qat_dev_gen == QAT_GEN4)
313                         session->is_ucs = 1;
314                 break;
315         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
316                 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
317                                         &session->qat_cipher_alg) != 0) {
318                         QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
319                         ret = -EINVAL;
320                         goto error_out;
321                 }
322                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
323                 break;
324         case RTE_CRYPTO_CIPHER_NULL:
325                 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
326                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
327                 break;
328         case RTE_CRYPTO_CIPHER_KASUMI_F8:
329                 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
330                                         &session->qat_cipher_alg) != 0) {
331                         QAT_LOG(ERR, "Invalid KASUMI cipher key size");
332                         ret = -EINVAL;
333                         goto error_out;
334                 }
335                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
336                 break;
337         case RTE_CRYPTO_CIPHER_3DES_CBC:
338                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
339                                 &session->qat_cipher_alg) != 0) {
340                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
341                         ret = -EINVAL;
342                         goto error_out;
343                 }
344                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
345                 break;
346         case RTE_CRYPTO_CIPHER_DES_CBC:
347                 if (qat_sym_validate_des_key(cipher_xform->key.length,
348                                 &session->qat_cipher_alg) != 0) {
349                         QAT_LOG(ERR, "Invalid DES cipher key size");
350                         ret = -EINVAL;
351                         goto error_out;
352                 }
353                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
354                 break;
355         case RTE_CRYPTO_CIPHER_3DES_CTR:
356                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
357                                 &session->qat_cipher_alg) != 0) {
358                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
359                         ret = -EINVAL;
360                         goto error_out;
361                 }
362                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
363                 break;
364         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
365                 ret = bpi_cipher_ctx_init(
366                                         cipher_xform->algo,
367                                         cipher_xform->op,
368                                         cipher_xform->key.data,
369                                         cipher_xform->key.length,
370                                         &session->bpi_ctx);
371                 if (ret != 0) {
372                         QAT_LOG(ERR, "failed to create DES BPI ctx");
373                         goto error_out;
374                 }
375                 if (qat_sym_validate_des_key(cipher_xform->key.length,
376                                 &session->qat_cipher_alg) != 0) {
377                         QAT_LOG(ERR, "Invalid DES cipher key size");
378                         ret = -EINVAL;
379                         goto error_out;
380                 }
381                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
382                 break;
383         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
384                 ret = bpi_cipher_ctx_init(
385                                         cipher_xform->algo,
386                                         cipher_xform->op,
387                                         cipher_xform->key.data,
388                                         cipher_xform->key.length,
389                                         &session->bpi_ctx);
390                 if (ret != 0) {
391                         QAT_LOG(ERR, "failed to create AES BPI ctx");
392                         goto error_out;
393                 }
394                 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
395                                 &session->qat_cipher_alg) != 0) {
396                         QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
397                         ret = -EINVAL;
398                         goto error_out;
399                 }
400                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
401                 break;
402         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
403                 if (!qat_is_cipher_alg_supported(
404                         cipher_xform->algo, internals)) {
405                         QAT_LOG(ERR, "%s not supported on this device",
406                                 rte_crypto_cipher_algorithm_strings
407                                         [cipher_xform->algo]);
408                         ret = -ENOTSUP;
409                         goto error_out;
410                 }
411                 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
412                                 &session->qat_cipher_alg) != 0) {
413                         QAT_LOG(ERR, "Invalid ZUC cipher key size");
414                         ret = -EINVAL;
415                         goto error_out;
416                 }
417                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
418                 break;
419         case RTE_CRYPTO_CIPHER_AES_XTS:
420                 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
421                         QAT_LOG(ERR, "AES-XTS-192 not supported");
422                         ret = -EINVAL;
423                         goto error_out;
424                 }
425                 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
426                                 &session->qat_cipher_alg) != 0) {
427                         QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
428                         ret = -EINVAL;
429                         goto error_out;
430                 }
431                 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
432                 break;
433         case RTE_CRYPTO_CIPHER_3DES_ECB:
434         case RTE_CRYPTO_CIPHER_AES_ECB:
435         case RTE_CRYPTO_CIPHER_AES_F8:
436         case RTE_CRYPTO_CIPHER_ARC4:
437                 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
438                                 cipher_xform->algo);
439                 ret = -ENOTSUP;
440                 goto error_out;
441         default:
442                 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
443                                 cipher_xform->algo);
444                 ret = -EINVAL;
445                 goto error_out;
446         }
447
448         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
449                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
450         else
451                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
452
453         if (qat_sym_cd_cipher_set(session,
454                                                 cipher_xform->key.data,
455                                                 cipher_xform->key.length)) {
456                 ret = -EINVAL;
457                 goto error_out;
458         }
459
460         return 0;
461
462 error_out:
463         if (session->bpi_ctx) {
464                 bpi_cipher_ctx_free(session->bpi_ctx);
465                 session->bpi_ctx = NULL;
466         }
467         return ret;
468 }
469
470 int
471 qat_sym_session_configure(struct rte_cryptodev *dev,
472                 struct rte_crypto_sym_xform *xform,
473                 struct rte_cryptodev_sym_session *sess,
474                 struct rte_mempool *mempool)
475 {
476         void *sess_private_data;
477         int ret;
478
479         if (rte_mempool_get(mempool, &sess_private_data)) {
480                 CDEV_LOG_ERR(
481                         "Couldn't get object from session mempool");
482                 return -ENOMEM;
483         }
484
485 #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
486         OSSL_PROVIDER *legacy;
487         OSSL_PROVIDER *deflt;
488
489         /* Load Multiple providers into the default (NULL) library context */
490         legacy = OSSL_PROVIDER_load(NULL, "legacy");
491         if (legacy == NULL)
492                 return -EINVAL;
493
494         deflt = OSSL_PROVIDER_load(NULL, "default");
495         if (deflt == NULL) {
496                 OSSL_PROVIDER_unload(legacy);
497                 return  -EINVAL;
498         }
499 #endif
500         ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
501         if (ret != 0) {
502                 QAT_LOG(ERR,
503                     "Crypto QAT PMD: failed to configure session parameters");
504
505                 /* Return session to mempool */
506                 rte_mempool_put(mempool, sess_private_data);
507                 return ret;
508         }
509
510         set_sym_session_private_data(sess, dev->driver_id,
511                 sess_private_data);
512
513 # if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
514         OSSL_PROVIDER_unload(legacy);
515         OSSL_PROVIDER_unload(deflt);
516 # endif
517         return 0;
518 }
519
520 int
521 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
522                 struct rte_crypto_sym_xform *xform, void *session_private)
523 {
524         struct qat_sym_session *session = session_private;
525         struct qat_cryptodev_private *internals = dev->data->dev_private;
526         enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
527         int ret;
528         int qat_cmd_id;
529
530         /* Verify the session physical address is known */
531         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
532         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
533                 QAT_LOG(ERR,
534                         "Session physical address unknown. Bad memory pool.");
535                 return -EINVAL;
536         }
537
538         memset(session, 0, sizeof(*session));
539         /* Set context descriptor physical address */
540         session->cd_paddr = session_paddr +
541                         offsetof(struct qat_sym_session, cd);
542
543         session->dev_id = internals->dev_id;
544         session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
545         session->is_ucs = 0;
546
547         /* Get requested QAT command id */
548         qat_cmd_id = qat_get_cmd_id(xform);
549         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
550                 QAT_LOG(ERR, "Unsupported xform chain requested");
551                 return -ENOTSUP;
552         }
553         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
554         switch (session->qat_cmd) {
555         case ICP_QAT_FW_LA_CMD_CIPHER:
556                 ret = qat_sym_session_configure_cipher(dev, xform, session);
557                 if (ret < 0)
558                         return ret;
559                 break;
560         case ICP_QAT_FW_LA_CMD_AUTH:
561                 ret = qat_sym_session_configure_auth(dev, xform, session);
562                 if (ret < 0)
563                         return ret;
564                 session->is_single_pass_gmac =
565                                qat_dev_gen == QAT_GEN3 &&
566                                xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
567                                xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
568                 break;
569         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
570                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
571                         ret = qat_sym_session_configure_aead(dev, xform,
572                                         session);
573                         if (ret < 0)
574                                 return ret;
575                 } else {
576                         ret = qat_sym_session_configure_cipher(dev,
577                                         xform, session);
578                         if (ret < 0)
579                                 return ret;
580                         ret = qat_sym_session_configure_auth(dev,
581                                         xform, session);
582                         if (ret < 0)
583                                 return ret;
584                 }
585                 break;
586         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
587                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
588                         ret = qat_sym_session_configure_aead(dev, xform,
589                                         session);
590                         if (ret < 0)
591                                 return ret;
592                 } else {
593                         ret = qat_sym_session_configure_auth(dev,
594                                         xform, session);
595                         if (ret < 0)
596                                 return ret;
597                         ret = qat_sym_session_configure_cipher(dev,
598                                         xform, session);
599                         if (ret < 0)
600                                 return ret;
601                 }
602                 break;
603         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
604         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
605         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
606         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
607         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
608         case ICP_QAT_FW_LA_CMD_MGF1:
609         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
610         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
611         case ICP_QAT_FW_LA_CMD_DELIMITER:
612         QAT_LOG(ERR, "Unsupported Service %u",
613                 session->qat_cmd);
614                 return -ENOTSUP;
615         default:
616         QAT_LOG(ERR, "Unsupported Service %u",
617                 session->qat_cmd);
618                 return -ENOTSUP;
619         }
620         qat_sym_session_finalize(session);
621
622         return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
623                         (void *)session);
624 }
625
626 static int
627 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
628                 const struct rte_crypto_aead_xform *aead_xform)
629 {
630         session->is_single_pass = 1;
631         session->is_auth = 1;
632         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
633         /* Chacha-Poly is special case that use QAT CTR mode */
634         if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
635                 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
636         else
637                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
638
639         session->cipher_iv.offset = aead_xform->iv.offset;
640         session->cipher_iv.length = aead_xform->iv.length;
641         session->aad_len = aead_xform->aad_length;
642         session->digest_length = aead_xform->digest_length;
643
644         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
645                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
646                 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
647         } else {
648                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
649                 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
650         }
651
652         return 0;
653 }
654
655 int
656 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
657                                 struct rte_crypto_sym_xform *xform,
658                                 struct qat_sym_session *session)
659 {
660         struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
661         struct qat_cryptodev_private *internals = dev->data->dev_private;
662         const uint8_t *key_data = auth_xform->key.data;
663         uint8_t key_length = auth_xform->key.length;
664         enum qat_device_gen qat_dev_gen =
665                         internals->qat_dev->qat_dev_gen;
666
667         session->aes_cmac = 0;
668         session->auth_key_length = auth_xform->key.length;
669         session->auth_iv.offset = auth_xform->iv.offset;
670         session->auth_iv.length = auth_xform->iv.length;
671         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
672         session->is_auth = 1;
673         session->digest_length = auth_xform->digest_length;
674
675         switch (auth_xform->algo) {
676         case RTE_CRYPTO_AUTH_SHA1:
677                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
678                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
679                 break;
680         case RTE_CRYPTO_AUTH_SHA224:
681                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
682                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
683                 break;
684         case RTE_CRYPTO_AUTH_SHA256:
685                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
686                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
687                 break;
688         case RTE_CRYPTO_AUTH_SHA384:
689                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
690                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
691                 break;
692         case RTE_CRYPTO_AUTH_SHA512:
693                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
694                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
695                 break;
696         case RTE_CRYPTO_AUTH_SHA1_HMAC:
697                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
698                 break;
699         case RTE_CRYPTO_AUTH_SHA224_HMAC:
700                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
701                 break;
702         case RTE_CRYPTO_AUTH_SHA256_HMAC:
703                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
704                 break;
705         case RTE_CRYPTO_AUTH_SHA384_HMAC:
706                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
707                 break;
708         case RTE_CRYPTO_AUTH_SHA512_HMAC:
709                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
710                 break;
711         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
712                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
713                 break;
714         case RTE_CRYPTO_AUTH_AES_CMAC:
715                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
716                 session->aes_cmac = 1;
717                 break;
718         case RTE_CRYPTO_AUTH_AES_GMAC:
719                 if (qat_sym_validate_aes_key(auth_xform->key.length,
720                                 &session->qat_cipher_alg) != 0) {
721                         QAT_LOG(ERR, "Invalid AES key size");
722                         return -EINVAL;
723                 }
724                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
725                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
726                 if (session->auth_iv.length == 0)
727                         session->auth_iv.length = AES_GCM_J0_LEN;
728                 else
729                         session->is_iv12B = 1;
730                 if (qat_dev_gen == QAT_GEN4) {
731                         session->is_cnt_zero = 1;
732                         session->is_ucs = 1;
733                 }
734                 break;
735         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
736                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
737                 break;
738         case RTE_CRYPTO_AUTH_MD5_HMAC:
739                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
740                 break;
741         case RTE_CRYPTO_AUTH_NULL:
742                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
743                 break;
744         case RTE_CRYPTO_AUTH_KASUMI_F9:
745                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
746                 break;
747         case RTE_CRYPTO_AUTH_ZUC_EIA3:
748                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
749                         QAT_LOG(ERR, "%s not supported on this device",
750                                 rte_crypto_auth_algorithm_strings
751                                 [auth_xform->algo]);
752                         return -ENOTSUP;
753                 }
754                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
755                 break;
756         case RTE_CRYPTO_AUTH_MD5:
757         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
758                 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
759                                 auth_xform->algo);
760                 return -ENOTSUP;
761         default:
762                 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
763                                 auth_xform->algo);
764                 return -EINVAL;
765         }
766
767         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
768                 session->is_gmac = 1;
769                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
770                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
771                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
772                         /*
773                          * It needs to create cipher desc content first,
774                          * then authentication
775                          */
776                         if (qat_sym_cd_cipher_set(session,
777                                                 auth_xform->key.data,
778                                                 auth_xform->key.length))
779                                 return -EINVAL;
780
781                         if (qat_sym_cd_auth_set(session,
782                                                 key_data,
783                                                 key_length,
784                                                 0,
785                                                 auth_xform->digest_length,
786                                                 auth_xform->op))
787                                 return -EINVAL;
788                 } else {
789                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
790                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
791                         /*
792                          * It needs to create authentication desc content first,
793                          * then cipher
794                          */
795
796                         if (qat_sym_cd_auth_set(session,
797                                         key_data,
798                                         key_length,
799                                         0,
800                                         auth_xform->digest_length,
801                                         auth_xform->op))
802                                 return -EINVAL;
803
804                         if (qat_sym_cd_cipher_set(session,
805                                                 auth_xform->key.data,
806                                                 auth_xform->key.length))
807                                 return -EINVAL;
808                 }
809         } else {
810                 if (qat_sym_cd_auth_set(session,
811                                 key_data,
812                                 key_length,
813                                 0,
814                                 auth_xform->digest_length,
815                                 auth_xform->op))
816                         return -EINVAL;
817         }
818
819         return 0;
820 }
821
822 int
823 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
824                                 struct rte_crypto_sym_xform *xform,
825                                 struct qat_sym_session *session)
826 {
827         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
828         enum rte_crypto_auth_operation crypto_operation;
829         struct qat_cryptodev_private *internals =
830                         dev->data->dev_private;
831         enum qat_device_gen qat_dev_gen =
832                         internals->qat_dev->qat_dev_gen;
833
834         /*
835          * Store AEAD IV parameters as cipher IV,
836          * to avoid unnecessary memory usage
837          */
838         session->cipher_iv.offset = xform->aead.iv.offset;
839         session->cipher_iv.length = xform->aead.iv.length;
840
841         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
842         session->is_auth = 1;
843         session->digest_length = aead_xform->digest_length;
844
845         session->is_single_pass = 0;
846         switch (aead_xform->algo) {
847         case RTE_CRYPTO_AEAD_AES_GCM:
848                 if (qat_sym_validate_aes_key(aead_xform->key.length,
849                                 &session->qat_cipher_alg) != 0) {
850                         QAT_LOG(ERR, "Invalid AES key size");
851                         return -EINVAL;
852                 }
853                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
854                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
855
856                 if (qat_dev_gen == QAT_GEN4)
857                         session->is_ucs = 1;
858                 if (session->cipher_iv.length == 0) {
859                         session->cipher_iv.length = AES_GCM_J0_LEN;
860                         break;
861                 }
862                 session->is_iv12B = 1;
863                 if (qat_dev_gen < QAT_GEN3)
864                         break;
865                 qat_sym_session_handle_single_pass(session,
866                                 aead_xform);
867                 break;
868         case RTE_CRYPTO_AEAD_AES_CCM:
869                 if (qat_sym_validate_aes_key(aead_xform->key.length,
870                                 &session->qat_cipher_alg) != 0) {
871                         QAT_LOG(ERR, "Invalid AES key size");
872                         return -EINVAL;
873                 }
874                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
875                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
876                 if (qat_dev_gen == QAT_GEN4)
877                         session->is_ucs = 1;
878                 break;
879         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
880                 if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
881                         return -EINVAL;
882                 if (qat_dev_gen == QAT_GEN4)
883                         session->is_ucs = 1;
884                 session->qat_cipher_alg =
885                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
886                 qat_sym_session_handle_single_pass(session,
887                                                 aead_xform);
888                 break;
889         default:
890                 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
891                                 aead_xform->algo);
892                 return -EINVAL;
893         }
894
895         if (session->is_single_pass) {
896                 if (qat_sym_cd_cipher_set(session,
897                                 aead_xform->key.data, aead_xform->key.length))
898                         return -EINVAL;
899         } else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
900                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
901                         (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
902                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
903                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
904                 /*
905                  * It needs to create cipher desc content first,
906                  * then authentication
907                  */
908                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
909                         RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
910
911                 if (qat_sym_cd_cipher_set(session,
912                                         aead_xform->key.data,
913                                         aead_xform->key.length))
914                         return -EINVAL;
915
916                 if (qat_sym_cd_auth_set(session,
917                                         aead_xform->key.data,
918                                         aead_xform->key.length,
919                                         aead_xform->aad_length,
920                                         aead_xform->digest_length,
921                                         crypto_operation))
922                         return -EINVAL;
923         } else {
924                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
925                 /*
926                  * It needs to create authentication desc content first,
927                  * then cipher
928                  */
929
930                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
931                         RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
932
933                 if (qat_sym_cd_auth_set(session,
934                                         aead_xform->key.data,
935                                         aead_xform->key.length,
936                                         aead_xform->aad_length,
937                                         aead_xform->digest_length,
938                                         crypto_operation))
939                         return -EINVAL;
940
941                 if (qat_sym_cd_cipher_set(session,
942                                         aead_xform->key.data,
943                                         aead_xform->key.length))
944                         return -EINVAL;
945         }
946
947         return 0;
948 }
949
950 unsigned int qat_sym_session_get_private_size(
951                 struct rte_cryptodev *dev __rte_unused)
952 {
953         return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
954 }
955
956 /* returns block size in bytes per cipher algo */
957 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
958 {
959         switch (qat_cipher_alg) {
960         case ICP_QAT_HW_CIPHER_ALGO_DES:
961                 return ICP_QAT_HW_DES_BLK_SZ;
962         case ICP_QAT_HW_CIPHER_ALGO_3DES:
963                 return ICP_QAT_HW_3DES_BLK_SZ;
964         case ICP_QAT_HW_CIPHER_ALGO_AES128:
965         case ICP_QAT_HW_CIPHER_ALGO_AES192:
966         case ICP_QAT_HW_CIPHER_ALGO_AES256:
967                 return ICP_QAT_HW_AES_BLK_SZ;
968         default:
969                 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
970                 return -EFAULT;
971         };
972         return -EFAULT;
973 }
974
975 /*
976  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
977  * This is digest size rounded up to nearest quadword
978  */
979 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
980 {
981         switch (qat_hash_alg) {
982         case ICP_QAT_HW_AUTH_ALGO_SHA1:
983                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
984                                                 QAT_HW_DEFAULT_ALIGNMENT);
985         case ICP_QAT_HW_AUTH_ALGO_SHA224:
986                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
987                                                 QAT_HW_DEFAULT_ALIGNMENT);
988         case ICP_QAT_HW_AUTH_ALGO_SHA256:
989                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
990                                                 QAT_HW_DEFAULT_ALIGNMENT);
991         case ICP_QAT_HW_AUTH_ALGO_SHA384:
992                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
993                                                 QAT_HW_DEFAULT_ALIGNMENT);
994         case ICP_QAT_HW_AUTH_ALGO_SHA512:
995                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
996                                                 QAT_HW_DEFAULT_ALIGNMENT);
997         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
998                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
999                                                 QAT_HW_DEFAULT_ALIGNMENT);
1000         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1001         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1002                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1003                                                 QAT_HW_DEFAULT_ALIGNMENT);
1004         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1005                 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1006                                                 QAT_HW_DEFAULT_ALIGNMENT);
1007         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1008                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1009                                                 QAT_HW_DEFAULT_ALIGNMENT);
1010         case ICP_QAT_HW_AUTH_ALGO_MD5:
1011                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1012                                                 QAT_HW_DEFAULT_ALIGNMENT);
1013         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1014                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1015                                                 QAT_HW_DEFAULT_ALIGNMENT);
1016         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1017                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1018                                                 QAT_HW_DEFAULT_ALIGNMENT);
1019         case ICP_QAT_HW_AUTH_ALGO_NULL:
1020                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1021                                                 QAT_HW_DEFAULT_ALIGNMENT);
1022         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1023                 /* return maximum state1 size in this case */
1024                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1025                                                 QAT_HW_DEFAULT_ALIGNMENT);
1026         default:
1027                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1028                 return -EFAULT;
1029         };
1030         return -EFAULT;
1031 }
1032
1033 /* returns digest size in bytes  per hash algo */
1034 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1035 {
1036         switch (qat_hash_alg) {
1037         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1038                 return ICP_QAT_HW_SHA1_STATE1_SZ;
1039         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1040                 return ICP_QAT_HW_SHA224_STATE1_SZ;
1041         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1042                 return ICP_QAT_HW_SHA256_STATE1_SZ;
1043         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1044                 return ICP_QAT_HW_SHA384_STATE1_SZ;
1045         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1046                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1047         case ICP_QAT_HW_AUTH_ALGO_MD5:
1048                 return ICP_QAT_HW_MD5_STATE1_SZ;
1049         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1050                 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1051         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1052                 /* return maximum digest size in this case */
1053                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1054         default:
1055                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1056                 return -EFAULT;
1057         };
1058         return -EFAULT;
1059 }
1060
1061 /* returns block size in byes per hash algo */
1062 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1063 {
1064         switch (qat_hash_alg) {
1065         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1066                 return SHA_CBLOCK;
1067         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1068                 return SHA256_CBLOCK;
1069         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1070                 return SHA256_CBLOCK;
1071         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1072                 return SHA512_CBLOCK;
1073         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1074                 return SHA512_CBLOCK;
1075         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1076                 return 16;
1077         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1078                 return ICP_QAT_HW_AES_BLK_SZ;
1079         case ICP_QAT_HW_AUTH_ALGO_MD5:
1080                 return MD5_CBLOCK;
1081         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1082                 /* return maximum block size in this case */
1083                 return SHA512_CBLOCK;
1084         default:
1085                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1086                 return -EFAULT;
1087         };
1088         return -EFAULT;
1089 }
1090
1091 #define HMAC_IPAD_VALUE 0x36
1092 #define HMAC_OPAD_VALUE 0x5c
1093 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1094
1095 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1096
1097 #ifdef RTE_QAT_LIBIPSECMB
1098 static int aes_ipsecmb_job(uint8_t *in, uint8_t *out, IMB_MGR *m,
1099                 const uint8_t *key, uint16_t auth_keylen)
1100 {
1101         int err;
1102         struct IMB_JOB *job;
1103         DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
1104         DECLARE_ALIGNED(uint32_t dust[4*15], 16);
1105
1106         if (auth_keylen == ICP_QAT_HW_AES_128_KEY_SZ)
1107                 IMB_AES_KEYEXP_128(m, key, expkey, dust);
1108         else if (auth_keylen == ICP_QAT_HW_AES_192_KEY_SZ)
1109                 IMB_AES_KEYEXP_192(m, key, expkey, dust);
1110         else if (auth_keylen == ICP_QAT_HW_AES_256_KEY_SZ)
1111                 IMB_AES_KEYEXP_256(m, key, expkey, dust);
1112         else
1113                 return -EFAULT;
1114
1115         job = IMB_GET_NEXT_JOB(m);
1116
1117         job->src = in;
1118         job->dst = out;
1119         job->enc_keys = expkey;
1120         job->key_len_in_bytes = auth_keylen;
1121         job->msg_len_to_cipher_in_bytes = 16;
1122         job->iv_len_in_bytes = 0;
1123         job->cipher_direction = IMB_DIR_ENCRYPT;
1124         job->cipher_mode = IMB_CIPHER_ECB;
1125         job->hash_alg = IMB_AUTH_NULL;
1126
1127         while (IMB_FLUSH_JOB(m) != NULL)
1128                 ;
1129
1130         job = IMB_SUBMIT_JOB(m);
1131         if (job) {
1132                 if (job->status == IMB_STATUS_COMPLETED)
1133                         return 0;
1134         }
1135
1136         err = imb_get_errno(m);
1137         if (err)
1138                 QAT_LOG(ERR, "Error: %s!\n", imb_get_strerror(err));
1139
1140         return -EFAULT;
1141 }
1142
1143 static int
1144 partial_hash_compute_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
1145                 uint8_t *data_in, uint8_t *data_out, IMB_MGR *m)
1146 {
1147         int digest_size;
1148         uint8_t digest[qat_hash_get_digest_size(
1149                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1150         uint32_t *hash_state_out_be32;
1151         uint64_t *hash_state_out_be64;
1152         int i;
1153
1154         /* Initialize to avoid gcc warning */
1155         memset(digest, 0, sizeof(digest));
1156
1157         digest_size = qat_hash_get_digest_size(hash_alg);
1158         if (digest_size <= 0)
1159                 return -EFAULT;
1160
1161         hash_state_out_be32 = (uint32_t *)data_out;
1162         hash_state_out_be64 = (uint64_t *)data_out;
1163
1164         switch (hash_alg) {
1165         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1166                 IMB_SHA1_ONE_BLOCK(m, data_in, digest);
1167                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1168                         *hash_state_out_be32 =
1169                                 rte_bswap32(*(((uint32_t *)digest)+i));
1170                 break;
1171         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1172                 IMB_SHA224_ONE_BLOCK(m, data_in, digest);
1173                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1174                         *hash_state_out_be32 =
1175                                 rte_bswap32(*(((uint32_t *)digest)+i));
1176                 break;
1177         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1178                 IMB_SHA256_ONE_BLOCK(m, data_in, digest);
1179                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1180                         *hash_state_out_be32 =
1181                                 rte_bswap32(*(((uint32_t *)digest)+i));
1182                 break;
1183         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1184                 IMB_SHA384_ONE_BLOCK(m, data_in, digest);
1185                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1186                         *hash_state_out_be64 =
1187                                 rte_bswap64(*(((uint64_t *)digest)+i));
1188                 break;
1189         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1190                 IMB_SHA512_ONE_BLOCK(m, data_in, digest);
1191                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1192                         *hash_state_out_be64 =
1193                                 rte_bswap64(*(((uint64_t *)digest)+i));
1194                 break;
1195         case ICP_QAT_HW_AUTH_ALGO_MD5:
1196                 IMB_MD5_ONE_BLOCK(m, data_in, data_out);
1197                 break;
1198         default:
1199                 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1200                 return -EFAULT;
1201         }
1202
1203         return 0;
1204 }
1205
1206 static int qat_sym_do_precomputes_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
1207                                 const uint8_t *auth_key,
1208                                 uint16_t auth_keylen,
1209                                 uint8_t *p_state_buf,
1210                                 uint16_t *p_state_len,
1211                                 uint8_t aes_cmac)
1212 {
1213         int block_size = 0;
1214         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1215         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1216         int i, ret = 0;
1217         uint8_t in[ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ];
1218
1219         IMB_MGR *m;
1220         m = alloc_mb_mgr(0);
1221         if (m == NULL)
1222                 return -ENOMEM;
1223
1224         init_mb_mgr_auto(m, NULL);
1225         memset(in, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1226         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1227
1228                 /* CMAC */
1229                 if (aes_cmac) {
1230                         uint8_t *k1, *k2;
1231                         auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1232                         rte_memcpy(p_state_buf, auth_key, auth_keylen);
1233
1234                         DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
1235                         DECLARE_ALIGNED(uint32_t dust[4*15], 16);
1236                         IMB_AES_KEYEXP_128(m, p_state_buf, expkey, dust);
1237                         k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1238                         k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1239
1240                         IMB_AES_CMAC_SUBKEY_GEN_128(m, expkey, k1, k2);
1241                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1242                         goto out;
1243                 }
1244
1245                 static uint8_t qat_aes_xcbc_key_seed[
1246                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1247                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1248                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1249                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1250                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1251                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1252                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1253                 };
1254
1255                 uint8_t *input = in;
1256                 uint8_t *out = p_state_buf;
1257                 rte_memcpy(input, qat_aes_xcbc_key_seed,
1258                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1259                 for (i = 0; i < HASH_XCBC_PRECOMP_KEY_NUM; i++) {
1260                         if (aes_ipsecmb_job(input, out, m, auth_key, auth_keylen)) {
1261                                 memset(input -
1262                                    (i * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1263                                   0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1264                                 ret = -EFAULT;
1265                                 goto out;
1266                         }
1267
1268                         input += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1269                         out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1270                 }
1271                 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1272                 goto out;
1273
1274         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1275                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1276                 uint8_t *out = p_state_buf;
1277
1278                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1279                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1280                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1281                 if (aes_ipsecmb_job(in, out, m, auth_key, auth_keylen)) {
1282                         ret = -EFAULT;
1283                         goto out;
1284                 }
1285
1286                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1287                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1288                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1289                 goto out;
1290         }
1291
1292         block_size = qat_hash_get_block_size(hash_alg);
1293         if (block_size < 0) {
1294                 free_mb_mgr(m);
1295                 return block_size;
1296         }
1297
1298         if (auth_keylen > (unsigned int)block_size) {
1299                 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1300                 ret = -EFAULT;
1301                 goto out;
1302         }
1303         /* init ipad and opad from key and xor with fixed values */
1304         memset(ipad, 0, block_size);
1305         memset(opad, 0, block_size);
1306         rte_memcpy(ipad, auth_key, auth_keylen);
1307         rte_memcpy(opad, auth_key, auth_keylen);
1308
1309         for (i = 0; i < block_size; i++) {
1310                 uint8_t *ipad_ptr = ipad + i;
1311                 uint8_t *opad_ptr = opad + i;
1312                 *ipad_ptr ^= HMAC_IPAD_VALUE;
1313                 *opad_ptr ^= HMAC_OPAD_VALUE;
1314         }
1315
1316         /* do partial hash of ipad and copy to state1 */
1317         if (partial_hash_compute_ipsec_mb(hash_alg, ipad, p_state_buf, m)) {
1318                 QAT_LOG(ERR, "ipad precompute failed");
1319                 ret = -EFAULT;
1320                 goto out;
1321         }
1322
1323         /*
1324          * State len is a multiple of 8, so may be larger than the digest.
1325          * Put the partial hash of opad state_len bytes after state1
1326          */
1327         *p_state_len = qat_hash_get_state1_size(hash_alg);
1328         if (partial_hash_compute_ipsec_mb(hash_alg, opad,
1329                                 p_state_buf + *p_state_len, m)) {
1330                 QAT_LOG(ERR, "opad precompute failed");
1331                 ret = -EFAULT;
1332                 goto out;
1333         }
1334
1335 out:
1336         /*  don't leave data lying around */
1337         memset(ipad, 0, block_size);
1338         memset(opad, 0, block_size);
1339         free_mb_mgr(m);
1340         return ret;
1341 }
1342 #endif
1343 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1344 {
1345         SHA_CTX ctx;
1346
1347         if (!SHA1_Init(&ctx))
1348                 return -EFAULT;
1349         SHA1_Transform(&ctx, data_in);
1350         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1351         return 0;
1352 }
1353
1354 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1355 {
1356         SHA256_CTX ctx;
1357
1358         if (!SHA224_Init(&ctx))
1359                 return -EFAULT;
1360         SHA256_Transform(&ctx, data_in);
1361         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1362         return 0;
1363 }
1364
1365 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1366 {
1367         SHA256_CTX ctx;
1368
1369         if (!SHA256_Init(&ctx))
1370                 return -EFAULT;
1371         SHA256_Transform(&ctx, data_in);
1372         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1373         return 0;
1374 }
1375
1376 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1377 {
1378         SHA512_CTX ctx;
1379
1380         if (!SHA384_Init(&ctx))
1381                 return -EFAULT;
1382         SHA512_Transform(&ctx, data_in);
1383         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1384         return 0;
1385 }
1386
1387 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1388 {
1389         SHA512_CTX ctx;
1390
1391         if (!SHA512_Init(&ctx))
1392                 return -EFAULT;
1393         SHA512_Transform(&ctx, data_in);
1394         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1395         return 0;
1396 }
1397
1398 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1399 {
1400         MD5_CTX ctx;
1401
1402         if (!MD5_Init(&ctx))
1403                 return -EFAULT;
1404         MD5_Transform(&ctx, data_in);
1405         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1406
1407         return 0;
1408 }
1409
1410 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1411 {
1412         int i;
1413
1414         derived[0] = base[0] << 1;
1415         for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1416                 derived[i] = base[i] << 1;
1417                 derived[i - 1] |= base[i] >> 7;
1418         }
1419
1420         if (base[0] & 0x80)
1421                 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1422 }
1423
1424 static int
1425 partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1426                 uint8_t *data_in, uint8_t *data_out)
1427 {
1428         int digest_size;
1429         uint8_t digest[qat_hash_get_digest_size(
1430                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1431         uint32_t *hash_state_out_be32;
1432         uint64_t *hash_state_out_be64;
1433         int i;
1434
1435         /* Initialize to avoid gcc warning */
1436         memset(digest, 0, sizeof(digest));
1437
1438         digest_size = qat_hash_get_digest_size(hash_alg);
1439         if (digest_size <= 0)
1440                 return -EFAULT;
1441
1442         hash_state_out_be32 = (uint32_t *)data_out;
1443         hash_state_out_be64 = (uint64_t *)data_out;
1444
1445         switch (hash_alg) {
1446         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1447                 if (partial_hash_sha1(data_in, digest))
1448                         return -EFAULT;
1449                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1450                         *hash_state_out_be32 =
1451                                 rte_bswap32(*(((uint32_t *)digest)+i));
1452                 break;
1453         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1454                 if (partial_hash_sha224(data_in, digest))
1455                         return -EFAULT;
1456                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1457                         *hash_state_out_be32 =
1458                                 rte_bswap32(*(((uint32_t *)digest)+i));
1459                 break;
1460         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1461                 if (partial_hash_sha256(data_in, digest))
1462                         return -EFAULT;
1463                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1464                         *hash_state_out_be32 =
1465                                 rte_bswap32(*(((uint32_t *)digest)+i));
1466                 break;
1467         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1468                 if (partial_hash_sha384(data_in, digest))
1469                         return -EFAULT;
1470                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1471                         *hash_state_out_be64 =
1472                                 rte_bswap64(*(((uint64_t *)digest)+i));
1473                 break;
1474         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1475                 if (partial_hash_sha512(data_in, digest))
1476                         return -EFAULT;
1477                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1478                         *hash_state_out_be64 =
1479                                 rte_bswap64(*(((uint64_t *)digest)+i));
1480                 break;
1481         case ICP_QAT_HW_AUTH_ALGO_MD5:
1482                 if (partial_hash_md5(data_in, data_out))
1483                         return -EFAULT;
1484                 break;
1485         default:
1486                 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1487                 return -EFAULT;
1488         }
1489
1490         return 0;
1491 }
1492
1493 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1494                                 const uint8_t *auth_key,
1495                                 uint16_t auth_keylen,
1496                                 uint8_t *p_state_buf,
1497                                 uint16_t *p_state_len,
1498                                 uint8_t aes_cmac)
1499 {
1500         int block_size;
1501         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1502         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1503         int i;
1504
1505         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1506
1507                 /* CMAC */
1508                 if (aes_cmac) {
1509                         AES_KEY enc_key;
1510                         uint8_t *in = NULL;
1511                         uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1512                         uint8_t *k1, *k2;
1513
1514                         auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1515
1516                         in = rte_zmalloc("AES CMAC K1",
1517                                          ICP_QAT_HW_AES_128_KEY_SZ, 16);
1518
1519                         if (in == NULL) {
1520                                 QAT_LOG(ERR, "Failed to alloc memory");
1521                                 return -ENOMEM;
1522                         }
1523
1524                         rte_memcpy(in, AES_CMAC_SEED,
1525                                    ICP_QAT_HW_AES_128_KEY_SZ);
1526                         rte_memcpy(p_state_buf, auth_key, auth_keylen);
1527
1528                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1529                                 &enc_key) != 0) {
1530                                 rte_free(in);
1531                                 return -EFAULT;
1532                         }
1533
1534                         AES_encrypt(in, k0, &enc_key);
1535
1536                         k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1537                         k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1538
1539                         aes_cmac_key_derive(k0, k1);
1540                         aes_cmac_key_derive(k1, k2);
1541
1542                         memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1543                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1544                         rte_free(in);
1545                         return 0;
1546                 } else {
1547                         static uint8_t qat_aes_xcbc_key_seed[
1548                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1549                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1550                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1551                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1552                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1553                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1554                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1555                         };
1556
1557                         uint8_t *in = NULL;
1558                         uint8_t *out = p_state_buf;
1559                         int x;
1560                         AES_KEY enc_key;
1561
1562                         in = rte_zmalloc("working mem for key",
1563                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1564                         if (in == NULL) {
1565                                 QAT_LOG(ERR, "Failed to alloc memory");
1566                                 return -ENOMEM;
1567                         }
1568
1569                         rte_memcpy(in, qat_aes_xcbc_key_seed,
1570                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1571                         for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1572                                 if (AES_set_encrypt_key(auth_key,
1573                                                         auth_keylen << 3,
1574                                                         &enc_key) != 0) {
1575                                         rte_free(in -
1576                                           (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1577                                         memset(out -
1578                                            (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1579                                           0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1580                                         return -EFAULT;
1581                                 }
1582                                 AES_encrypt(in, out, &enc_key);
1583                                 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1584                                 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1585                         }
1586                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1587                         rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1588                         return 0;
1589                 }
1590
1591         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1592                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1593                 uint8_t *in = NULL;
1594                 uint8_t *out = p_state_buf;
1595                 AES_KEY enc_key;
1596
1597                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1598                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1599                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1600                 in = rte_zmalloc("working mem for key",
1601                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
1602                 if (in == NULL) {
1603                         QAT_LOG(ERR, "Failed to alloc memory");
1604                         return -ENOMEM;
1605                 }
1606
1607                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1608                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1609                         &enc_key) != 0) {
1610                         return -EFAULT;
1611                 }
1612                 AES_encrypt(in, out, &enc_key);
1613                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1614                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1615                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1616                 rte_free(in);
1617                 return 0;
1618         }
1619
1620         block_size = qat_hash_get_block_size(hash_alg);
1621         if (block_size < 0)
1622                 return block_size;
1623         /* init ipad and opad from key and xor with fixed values */
1624         memset(ipad, 0, block_size);
1625         memset(opad, 0, block_size);
1626
1627         if (auth_keylen > (unsigned int)block_size) {
1628                 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1629                 return -EFAULT;
1630         }
1631         rte_memcpy(ipad, auth_key, auth_keylen);
1632         rte_memcpy(opad, auth_key, auth_keylen);
1633
1634         for (i = 0; i < block_size; i++) {
1635                 uint8_t *ipad_ptr = ipad + i;
1636                 uint8_t *opad_ptr = opad + i;
1637                 *ipad_ptr ^= HMAC_IPAD_VALUE;
1638                 *opad_ptr ^= HMAC_OPAD_VALUE;
1639         }
1640
1641         /* do partial hash of ipad and copy to state1 */
1642         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1643                 memset(ipad, 0, block_size);
1644                 memset(opad, 0, block_size);
1645                 QAT_LOG(ERR, "ipad precompute failed");
1646                 return -EFAULT;
1647         }
1648
1649         /*
1650          * State len is a multiple of 8, so may be larger than the digest.
1651          * Put the partial hash of opad state_len bytes after state1
1652          */
1653         *p_state_len = qat_hash_get_state1_size(hash_alg);
1654         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1655                 memset(ipad, 0, block_size);
1656                 memset(opad, 0, block_size);
1657                 QAT_LOG(ERR, "opad precompute failed");
1658                 return -EFAULT;
1659         }
1660
1661         /*  don't leave data lying around */
1662         memset(ipad, 0, block_size);
1663         memset(opad, 0, block_size);
1664         return 0;
1665 }
1666
1667 static void
1668 qat_sym_session_init_common_hdr(struct qat_sym_session *session)
1669 {
1670         struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
1671         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1672         enum qat_sym_proto_flag proto_flags = session->qat_proto_flag;
1673         uint32_t slice_flags = session->slice_types;
1674
1675         header->hdr_flags =
1676                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1677         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1678         header->service_cmd_id = session->qat_cmd;
1679         header->comn_req_flags =
1680                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1681                                         QAT_COMN_PTR_TYPE_FLAT);
1682         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1683                                   ICP_QAT_FW_LA_PARTIAL_NONE);
1684         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1685                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1686
1687         switch (proto_flags)            {
1688         case QAT_CRYPTO_PROTO_FLAG_NONE:
1689                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1690                                         ICP_QAT_FW_LA_NO_PROTO);
1691                 break;
1692         case QAT_CRYPTO_PROTO_FLAG_CCM:
1693                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1694                                         ICP_QAT_FW_LA_CCM_PROTO);
1695                 break;
1696         case QAT_CRYPTO_PROTO_FLAG_GCM:
1697                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1698                                         ICP_QAT_FW_LA_GCM_PROTO);
1699                 break;
1700         case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1701                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1702                                         ICP_QAT_FW_LA_SNOW_3G_PROTO);
1703                 break;
1704         case QAT_CRYPTO_PROTO_FLAG_ZUC:
1705                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1706                         ICP_QAT_FW_LA_ZUC_3G_PROTO);
1707                 break;
1708         }
1709
1710         /* More than one of the following flags can be set at once */
1711         if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_SPC)) {
1712                 ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
1713                         header->serv_specif_flags,
1714                         ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
1715         }
1716         if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_UCS)) {
1717                 ICP_QAT_FW_LA_SLICE_TYPE_SET(
1718                         header->serv_specif_flags,
1719                         ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
1720         }
1721
1722         if (session->is_auth) {
1723                 if (session->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1724                         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1725                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1726                         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1727                                         ICP_QAT_FW_LA_CMP_AUTH_RES);
1728                 } else if (session->auth_op == ICP_QAT_HW_AUTH_GENERATE) {
1729                         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1730                                                 ICP_QAT_FW_LA_RET_AUTH_RES);
1731                         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1732                                                 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1733                 }
1734         } else {
1735                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1736                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1737                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1738                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1739         }
1740
1741         if (session->is_iv12B) {
1742                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1743                         header->serv_specif_flags,
1744                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1745         }
1746
1747         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1748                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
1749         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1750                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1751 }
1752
1753 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
1754                                                 const uint8_t *cipherkey,
1755                                                 uint32_t cipherkeylen)
1756 {
1757         struct icp_qat_hw_cipher_algo_blk *cipher;
1758         struct icp_qat_hw_cipher_algo_blk20 *cipher20;
1759         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1760         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1761         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1762         void *ptr = &req_tmpl->cd_ctrl;
1763         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1764         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1765         enum icp_qat_hw_cipher_convert key_convert;
1766         struct icp_qat_fw_la_cipher_20_req_params *req_ucs =
1767                         (struct icp_qat_fw_la_cipher_20_req_params *)
1768                         &cdesc->fw_req.serv_specif_rqpars;
1769         struct icp_qat_fw_la_cipher_req_params *req_cipher =
1770                         (struct icp_qat_fw_la_cipher_req_params *)
1771                         &cdesc->fw_req.serv_specif_rqpars;
1772         uint32_t total_key_size;
1773         uint16_t cipher_offset, cd_size;
1774         uint32_t wordIndex  = 0;
1775         uint32_t *temp_key = NULL;
1776
1777         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1778                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1779                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1780                                         ICP_QAT_FW_SLICE_CIPHER);
1781                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1782                                         ICP_QAT_FW_SLICE_DRAM_WR);
1783                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1784                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1785                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1786                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1787                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1788         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1789                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1790                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1791                                         ICP_QAT_FW_SLICE_CIPHER);
1792                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1793                                         ICP_QAT_FW_SLICE_AUTH);
1794                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1795                                         ICP_QAT_FW_SLICE_AUTH);
1796                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1797                                         ICP_QAT_FW_SLICE_DRAM_WR);
1798                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1799         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1800                 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1801                 return -EFAULT;
1802         }
1803
1804         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1805                 /*
1806                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
1807                  * Overriding default values previously set.
1808                  * Chacha20-Poly1305 is special case, CTR but single-pass
1809                  * so both direction need to be used.
1810                  */
1811                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1812                 if (cdesc->qat_cipher_alg ==
1813                         ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 &&
1814                         cdesc->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
1815                                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
1816                 }
1817                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1818         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1819                 || cdesc->qat_cipher_alg ==
1820                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1821                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1822         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1823                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1824         else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE)
1825                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1826         else
1827                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1828
1829         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1830                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1831                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1832                 cipher_cd_ctrl->cipher_state_sz =
1833                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1834                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1835
1836         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1837                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1838                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1839                 cipher_cd_ctrl->cipher_padding_sz =
1840                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1841         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1842                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1843                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1844         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1845                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1846                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1847         } else if (cdesc->qat_cipher_alg ==
1848                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1849                 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1850                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1851                 cipher_cd_ctrl->cipher_state_sz =
1852                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1853                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1854         } else {
1855                 total_key_size = cipherkeylen;
1856                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1857         }
1858         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1859         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1860
1861         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1862         cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
1863         cipher->cipher_config.val =
1864             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1865                                         cdesc->qat_cipher_alg, key_convert,
1866                                         cdesc->qat_dir);
1867
1868         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1869                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1870                                         sizeof(struct icp_qat_hw_cipher_config)
1871                                         + cipherkeylen);
1872                 memcpy(cipher->key, cipherkey, cipherkeylen);
1873                 memcpy(temp_key, cipherkey, cipherkeylen);
1874
1875                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1876                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1877                                                                 wordIndex++)
1878                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1879
1880                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1881                                         cipherkeylen + cipherkeylen;
1882         } else if (cdesc->is_ucs) {
1883                 const uint8_t *final_key = cipherkey;
1884
1885                 cdesc->slice_types |= QAT_CRYPTO_SLICE_UCS;
1886                 total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
1887                         ICP_QAT_HW_AES_128_KEY_SZ);
1888                 cipher20->cipher_config.reserved[0] = 0;
1889                 cipher20->cipher_config.reserved[1] = 0;
1890                 cipher20->cipher_config.reserved[2] = 0;
1891
1892                 rte_memcpy(cipher20->key, final_key, cipherkeylen);
1893                 cdesc->cd_cur_ptr +=
1894                         sizeof(struct icp_qat_hw_ucs_cipher_config) +
1895                                         cipherkeylen;
1896         } else {
1897                 memcpy(cipher->key, cipherkey, cipherkeylen);
1898                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1899                                         cipherkeylen;
1900         }
1901
1902         if (cdesc->is_single_pass) {
1903                 QAT_FIELD_SET(cipher->cipher_config.val,
1904                         cdesc->digest_length,
1905                         QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
1906                         QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
1907                 /* UCS and SPC 1.8/2.0 share configuration of 2nd config word */
1908                 cdesc->cd.cipher.cipher_config.reserved =
1909                                 ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
1910                                         cdesc->aad_len);
1911                 cdesc->slice_types |= QAT_CRYPTO_SLICE_SPC;
1912         }
1913
1914         if (total_key_size > cipherkeylen) {
1915                 uint32_t padding_size =  total_key_size-cipherkeylen;
1916                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1917                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1918                         /* K3 not provided so use K1 = K3*/
1919                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1920                 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1921                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1922                         /* K2 and K3 not provided so use K1 = K2 = K3*/
1923                         memcpy(cdesc->cd_cur_ptr, cipherkey,
1924                                 cipherkeylen);
1925                         memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1926                                 cipherkey, cipherkeylen);
1927                 } else
1928                         memset(cdesc->cd_cur_ptr, 0, padding_size);
1929
1930                 cdesc->cd_cur_ptr += padding_size;
1931         }
1932         if (cdesc->is_ucs) {
1933                 /*
1934                  * These values match in terms of position auth
1935                  * slice request fields
1936                  */
1937                 req_ucs->spc_auth_res_sz = cdesc->digest_length;
1938                 if (!cdesc->is_gmac) {
1939                         req_ucs->spc_aad_sz = cdesc->aad_len;
1940                         req_ucs->spc_aad_offset = 0;
1941                 }
1942         } else if (cdesc->is_single_pass) {
1943                 req_cipher->spc_aad_sz = cdesc->aad_len;
1944                 req_cipher->spc_auth_res_sz = cdesc->digest_length;
1945         }
1946         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1947         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1948         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1949
1950         return 0;
1951 }
1952
1953 int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
1954                                                 const uint8_t *authkey,
1955                                                 uint32_t authkeylen,
1956                                                 uint32_t aad_length,
1957                                                 uint32_t digestsize,
1958                                                 unsigned int operation)
1959 {
1960         struct icp_qat_hw_auth_setup *hash;
1961         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1962         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1963         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1964         void *ptr = &req_tmpl->cd_ctrl;
1965         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1966         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1967         struct icp_qat_fw_la_auth_req_params *auth_param =
1968                 (struct icp_qat_fw_la_auth_req_params *)
1969                 ((char *)&req_tmpl->serv_specif_rqpars +
1970                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1971         uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1972         uint16_t hash_offset, cd_size;
1973         uint32_t *aad_len = NULL;
1974         uint32_t wordIndex  = 0;
1975         uint32_t *pTempKey;
1976         int ret = 0;
1977
1978         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1979                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1980                                         ICP_QAT_FW_SLICE_AUTH);
1981                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1982                                         ICP_QAT_FW_SLICE_DRAM_WR);
1983                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1984         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1985                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1986                                 ICP_QAT_FW_SLICE_AUTH);
1987                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1988                                 ICP_QAT_FW_SLICE_CIPHER);
1989                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1990                                 ICP_QAT_FW_SLICE_CIPHER);
1991                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1992                                 ICP_QAT_FW_SLICE_DRAM_WR);
1993                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1994         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1995                 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1996                 return -EFAULT;
1997         }
1998
1999         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY)
2000                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
2001         else
2002                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
2003
2004         /*
2005          * Setup the inner hash config
2006          */
2007         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
2008         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
2009         hash->auth_config.reserved = 0;
2010         hash->auth_config.config =
2011                         ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
2012                                 cdesc->qat_hash_alg, digestsize);
2013
2014         if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
2015                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
2016                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
2017                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
2018                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
2019                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
2020                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
2021                 || cdesc->is_cnt_zero
2022                         )
2023                 hash->auth_counter.counter = 0;
2024         else {
2025                 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
2026
2027                 if (block_size < 0)
2028                         return block_size;
2029                 hash->auth_counter.counter = rte_bswap32(block_size);
2030         }
2031
2032         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
2033
2034         /*
2035          * cd_cur_ptr now points at the state1 information.
2036          */
2037         switch (cdesc->qat_hash_alg) {
2038         case ICP_QAT_HW_AUTH_ALGO_SHA1:
2039                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2040                         /* Plain SHA-1 */
2041                         rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
2042                                         sizeof(sha1InitialState));
2043                         state1_size = qat_hash_get_state1_size(
2044                                         cdesc->qat_hash_alg);
2045                         break;
2046                 }
2047                 /* SHA-1 HMAC */
2048                 if (qat_ipsec_mb_lib) {
2049 #ifdef RTE_QAT_LIBIPSECMB
2050                         ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA1,
2051                                 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2052                                 cdesc->aes_cmac);
2053 #else
2054                         QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
2055                         return -EFAULT;
2056 #endif
2057                 } else {
2058                         ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
2059                                 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2060                                 cdesc->aes_cmac);
2061                 }
2062
2063                 if (ret) {
2064                         QAT_LOG(ERR, "(SHA)precompute failed");
2065                         return -EFAULT;
2066                 }
2067                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
2068                 break;
2069         case ICP_QAT_HW_AUTH_ALGO_SHA224:
2070                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2071                         /* Plain SHA-224 */
2072                         rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
2073                                         sizeof(sha224InitialState));
2074                         state1_size = qat_hash_get_state1_size(
2075                                         cdesc->qat_hash_alg);
2076                         break;
2077                 }
2078                 /* SHA-224 HMAC */
2079                 if (qat_ipsec_mb_lib) {
2080 #ifdef RTE_QAT_LIBIPSECMB
2081                         ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA224,
2082                                 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2083                                 cdesc->aes_cmac);
2084 #else
2085                         QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
2086                         return -EFAULT;
2087 #endif
2088                 } else {
2089                         ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
2090                                 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2091                                 cdesc->aes_cmac);
2092                 }
2093
2094                 if (ret) {
2095                         QAT_LOG(ERR, "(SHA)precompute failed");
2096                         return -EFAULT;
2097                 }
2098                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
2099                 break;
2100         case ICP_QAT_HW_AUTH_ALGO_SHA256:
2101                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2102                         /* Plain SHA-256 */
2103                         rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
2104                                         sizeof(sha256InitialState));
2105                         state1_size = qat_hash_get_state1_size(
2106                                         cdesc->qat_hash_alg);
2107                         break;
2108                 }
2109                 /* SHA-256 HMAC */
2110                 if (qat_ipsec_mb_lib) {
2111 #ifdef RTE_QAT_LIBIPSECMB
2112                         ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA256,
2113                                 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2114                                 cdesc->aes_cmac);
2115 #else
2116                         QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
2117                         return -EFAULT;
2118 #endif
2119                 } else {
2120                         ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
2121                                 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2122                                 cdesc->aes_cmac);
2123                 }
2124
2125                 if (ret) {
2126                         QAT_LOG(ERR, "(SHA)precompute failed");
2127                         return -EFAULT;
2128                 }
2129                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
2130                 break;
2131         case ICP_QAT_HW_AUTH_ALGO_SHA384:
2132                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2133                         /* Plain SHA-384 */
2134                         rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
2135                                         sizeof(sha384InitialState));
2136                         state1_size = qat_hash_get_state1_size(
2137                                         cdesc->qat_hash_alg);
2138                         break;
2139                 }
2140                 /* SHA-384 HMAC */
2141                 if (qat_ipsec_mb_lib) {
2142 #ifdef RTE_QAT_LIBIPSECMB
2143                         ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA384,
2144                                 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2145                                 cdesc->aes_cmac);
2146 #else
2147                         QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
2148                         return -EFAULT;
2149 #endif
2150                 } else {
2151                         ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
2152                                 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2153                                 cdesc->aes_cmac);
2154                 }
2155
2156                 if (ret) {
2157                         QAT_LOG(ERR, "(SHA)precompute failed");
2158                         return -EFAULT;
2159                 }
2160                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
2161                 break;
2162         case ICP_QAT_HW_AUTH_ALGO_SHA512:
2163                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
2164                         /* Plain SHA-512 */
2165                         rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
2166                                         sizeof(sha512InitialState));
2167                         state1_size = qat_hash_get_state1_size(
2168                                         cdesc->qat_hash_alg);
2169                         break;
2170                 }
2171                 /* SHA-512 HMAC */
2172                 if (qat_ipsec_mb_lib) {
2173 #ifdef RTE_QAT_LIBIPSECMB
2174                         ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA512,
2175                                 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2176                                 cdesc->aes_cmac);
2177 #else
2178                         QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
2179                         return -EFAULT;
2180 #endif
2181                 } else {
2182                         ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
2183                                 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2184                                 cdesc->aes_cmac);
2185                 }
2186
2187                 if (ret) {
2188                         QAT_LOG(ERR, "(SHA)precompute failed");
2189                         return -EFAULT;
2190                 }
2191                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
2192                 break;
2193         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
2194                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
2195
2196                 if (cdesc->aes_cmac)
2197                         memset(cdesc->cd_cur_ptr, 0, state1_size);
2198                 if (qat_ipsec_mb_lib) {
2199 #ifdef RTE_QAT_LIBIPSECMB
2200                         ret = qat_sym_do_precomputes_ipsec_mb(
2201                                 ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
2202                                 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
2203                                 &state2_size, cdesc->aes_cmac);
2204 #else
2205                         QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
2206                         return -EFAULT;
2207 #endif
2208                 } else {
2209                         ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
2210                                 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
2211                                 &state2_size, cdesc->aes_cmac);
2212                 }
2213
2214                 if (ret) {
2215                         cdesc->aes_cmac ? QAT_LOG(ERR,
2216                                                   "(CMAC)precompute failed")
2217                                         : QAT_LOG(ERR,
2218                                                   "(XCBC)precompute failed");
2219                         return -EFAULT;
2220                 }
2221                 break;
2222         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
2223         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
2224                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
2225                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
2226                 if (qat_ipsec_mb_lib) {
2227 #ifdef RTE_QAT_LIBIPSECMB
2228                         ret = qat_sym_do_precomputes_ipsec_mb(cdesc->qat_hash_alg, authkey,
2229                                 authkeylen, cdesc->cd_cur_ptr + state1_size,
2230                                 &state2_size, cdesc->aes_cmac);
2231 #else
2232                         QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
2233                         return -EFAULT;
2234 #endif
2235                 } else {
2236                         ret = qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
2237                                 authkeylen, cdesc->cd_cur_ptr + state1_size,
2238                                 &state2_size, cdesc->aes_cmac);
2239                 }
2240
2241                 if (ret) {
2242                         QAT_LOG(ERR, "(GCM)precompute failed");
2243                         return -EFAULT;
2244                 }
2245                 /*
2246                  * Write (the length of AAD) into bytes 16-19 of state2
2247                  * in big-endian format. This field is 8 bytes
2248                  */
2249                 auth_param->u2.aad_sz =
2250                                 RTE_ALIGN_CEIL(aad_length, 16);
2251                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
2252
2253                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
2254                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
2255                                         ICP_QAT_HW_GALOIS_H_SZ);
2256                 *aad_len = rte_bswap32(aad_length);
2257                 cdesc->aad_len = aad_length;
2258                 break;
2259         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
2260                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
2261                 state1_size = qat_hash_get_state1_size(
2262                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
2263                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
2264                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
2265
2266                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
2267                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
2268                 cipherconfig->cipher_config.val =
2269                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
2270                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
2271                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
2272                         ICP_QAT_HW_CIPHER_ENCRYPT);
2273                 memcpy(cipherconfig->key, authkey, authkeylen);
2274                 memset(cipherconfig->key + authkeylen,
2275                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
2276                 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
2277                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
2278                 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
2279                 break;
2280         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
2281                 hash->auth_config.config =
2282                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
2283                                 cdesc->qat_hash_alg, digestsize);
2284                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
2285                 state1_size = qat_hash_get_state1_size(
2286                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
2287                 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
2288                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
2289                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
2290
2291                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2292                 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
2293                 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
2294
2295                 break;
2296         case ICP_QAT_HW_AUTH_ALGO_MD5:
2297                 if (qat_ipsec_mb_lib) {
2298 #ifdef RTE_QAT_LIBIPSECMB
2299                         ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_MD5,
2300                                 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
2301                                 cdesc->aes_cmac);
2302 #else
2303                         QAT_LOG(ERR, "Intel IPSEC-MB LIB missing");
2304                         return -EFAULT;
2305 #endif
2306                 } else {
2307                         ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
2308                                 authkeylen, cdesc->cd_cur_ptr, &state1_size,
2309                                 cdesc->aes_cmac);
2310                 }
2311
2312                 if (ret) {
2313                         QAT_LOG(ERR, "(MD5)precompute failed");
2314                         return -EFAULT;
2315                 }
2316                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
2317                 break;
2318         case ICP_QAT_HW_AUTH_ALGO_NULL:
2319                 state1_size = qat_hash_get_state1_size(
2320                                 ICP_QAT_HW_AUTH_ALGO_NULL);
2321                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
2322                 break;
2323         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
2324                 cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
2325                 state1_size = qat_hash_get_state1_size(
2326                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
2327                 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
2328                                 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
2329
2330                 if (aad_length > 0) {
2331                         aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
2332                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
2333                         auth_param->u2.aad_sz =
2334                         RTE_ALIGN_CEIL(aad_length,
2335                         ICP_QAT_HW_CCM_AAD_ALIGNMENT);
2336                 } else {
2337                         auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
2338                 }
2339                 cdesc->aad_len = aad_length;
2340                 hash->auth_counter.counter = 0;
2341
2342                 hash_cd_ctrl->outer_prefix_sz = digestsize;
2343                 auth_param->hash_state_sz = digestsize;
2344
2345                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2346                 break;
2347         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
2348                 state1_size = qat_hash_get_state1_size(
2349                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
2350                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
2351                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
2352                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
2353                                                         + authkeylen);
2354                 /*
2355                 * The Inner Hash Initial State2 block must contain IK
2356                 * (Initialisation Key), followed by IK XOR-ed with KM
2357                 * (Key Modifier): IK||(IK^KM).
2358                 */
2359                 /* write the auth key */
2360                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2361                 /* initialise temp key with auth key */
2362                 memcpy(pTempKey, authkey, authkeylen);
2363                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
2364                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
2365                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
2366                 break;
2367         default:
2368                 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
2369                 return -EFAULT;
2370         }
2371
2372         /* Auth CD config setup */
2373         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
2374         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
2375         hash_cd_ctrl->inner_res_sz = digestsize;
2376         hash_cd_ctrl->final_sz = digestsize;
2377         hash_cd_ctrl->inner_state1_sz = state1_size;
2378         auth_param->auth_res_sz = digestsize;
2379
2380         hash_cd_ctrl->inner_state2_sz  = state2_size;
2381         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2382                         ((sizeof(struct icp_qat_hw_auth_setup) +
2383                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2384                                         >> 3);
2385
2386         cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2387         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2388
2389         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2390         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2391
2392         return 0;
2393 }
2394
2395 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2396 {
2397         switch (key_len) {
2398         case ICP_QAT_HW_AES_128_KEY_SZ:
2399                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2400                 break;
2401         case ICP_QAT_HW_AES_192_KEY_SZ:
2402                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2403                 break;
2404         case ICP_QAT_HW_AES_256_KEY_SZ:
2405                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2406                 break;
2407         default:
2408                 return -EINVAL;
2409         }
2410         return 0;
2411 }
2412
2413 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2414                 enum icp_qat_hw_cipher_algo *alg)
2415 {
2416         switch (key_len) {
2417         case ICP_QAT_HW_AES_128_KEY_SZ:
2418                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2419                 break;
2420         case ICP_QAT_HW_AES_256_KEY_SZ:
2421                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2422                 break;
2423         default:
2424                 return -EINVAL;
2425         }
2426         return 0;
2427 }
2428
2429 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2430 {
2431         switch (key_len) {
2432         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2433                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2434                 break;
2435         default:
2436                 return -EINVAL;
2437         }
2438         return 0;
2439 }
2440
2441 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2442 {
2443         switch (key_len) {
2444         case ICP_QAT_HW_KASUMI_KEY_SZ:
2445                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2446                 break;
2447         default:
2448                 return -EINVAL;
2449         }
2450         return 0;
2451 }
2452
2453 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2454 {
2455         switch (key_len) {
2456         case ICP_QAT_HW_DES_KEY_SZ:
2457                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2458                 break;
2459         default:
2460                 return -EINVAL;
2461         }
2462         return 0;
2463 }
2464
2465 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2466 {
2467         switch (key_len) {
2468         case QAT_3DES_KEY_SZ_OPT1:
2469         case QAT_3DES_KEY_SZ_OPT2:
2470         case QAT_3DES_KEY_SZ_OPT3:
2471                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2472                 break;
2473         default:
2474                 return -EINVAL;
2475         }
2476         return 0;
2477 }
2478
2479 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2480 {
2481         switch (key_len) {
2482         case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2483                 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2484                 break;
2485         default:
2486                 return -EINVAL;
2487         }
2488         return 0;
2489 }
2490
2491 #ifdef RTE_LIB_SECURITY
2492 static int
2493 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2494 {
2495         struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2496         struct rte_security_docsis_xform *docsis = &conf->docsis;
2497
2498         /* CRC generate -> Cipher encrypt */
2499         if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2500
2501                 if (crypto_sym != NULL &&
2502                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2503                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2504                     crypto_sym->cipher.algo ==
2505                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2506                     (crypto_sym->cipher.key.length ==
2507                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2508                      crypto_sym->cipher.key.length ==
2509                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2510                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2511                     crypto_sym->next == NULL) {
2512                         return 0;
2513                 }
2514         /* Cipher decrypt -> CRC verify */
2515         } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2516
2517                 if (crypto_sym != NULL &&
2518                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2519                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2520                     crypto_sym->cipher.algo ==
2521                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2522                     (crypto_sym->cipher.key.length ==
2523                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2524                      crypto_sym->cipher.key.length ==
2525                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2526                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2527                     crypto_sym->next == NULL) {
2528                         return 0;
2529                 }
2530         }
2531
2532         return -EINVAL;
2533 }
2534
2535 static int
2536 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2537                 struct rte_security_session_conf *conf, void *session_private)
2538 {
2539         int ret;
2540         int qat_cmd_id;
2541         struct rte_crypto_sym_xform *xform = NULL;
2542         struct qat_sym_session *session = session_private;
2543
2544         /* Clear the session */
2545         memset(session, 0, qat_sym_session_get_private_size(dev));
2546
2547         ret = qat_sec_session_check_docsis(conf);
2548         if (ret) {
2549                 QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2550                 return ret;
2551         }
2552
2553         xform = conf->crypto_xform;
2554
2555         /* Verify the session physical address is known */
2556         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2557         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2558                 QAT_LOG(ERR,
2559                         "Session physical address unknown. Bad memory pool.");
2560                 return -EINVAL;
2561         }
2562
2563         /* Set context descriptor physical address */
2564         session->cd_paddr = session_paddr +
2565                         offsetof(struct qat_sym_session, cd);
2566
2567         /* Get requested QAT command id - should be cipher */
2568         qat_cmd_id = qat_get_cmd_id(xform);
2569         if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2570                 QAT_LOG(ERR, "Unsupported xform chain requested");
2571                 return -ENOTSUP;
2572         }
2573         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2574
2575         ret = qat_sym_session_configure_cipher(dev, xform, session);
2576         if (ret < 0)
2577                 return ret;
2578         qat_sym_session_finalize(session);
2579
2580         return 0;
2581 }
2582
2583 int
2584 qat_security_session_create(void *dev,
2585                                 struct rte_security_session_conf *conf,
2586                                 struct rte_security_session *sess,
2587                                 struct rte_mempool *mempool)
2588 {
2589         void *sess_private_data;
2590         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2591         struct qat_cryptodev_private *internals = cdev->data->dev_private;
2592         enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
2593         struct qat_sym_session *sym_session = NULL;
2594         int ret;
2595
2596         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2597                         conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2598                 QAT_LOG(ERR, "Invalid security protocol");
2599                 return -EINVAL;
2600         }
2601
2602         if (rte_mempool_get(mempool, &sess_private_data)) {
2603                 QAT_LOG(ERR, "Couldn't get object from session mempool");
2604                 return -ENOMEM;
2605         }
2606
2607         ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2608                         sess_private_data);
2609         if (ret != 0) {
2610                 QAT_LOG(ERR, "Failed to configure session parameters");
2611                 /* Return session to mempool */
2612                 rte_mempool_put(mempool, sess_private_data);
2613                 return ret;
2614         }
2615
2616         set_sec_session_private_data(sess, sess_private_data);
2617         sym_session = (struct qat_sym_session *)sess_private_data;
2618         sym_session->dev_id = internals->dev_id;
2619
2620         return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
2621                         sess_private_data);
2622 }
2623
2624 int
2625 qat_security_session_destroy(void *dev __rte_unused,
2626                                  struct rte_security_session *sess)
2627 {
2628         void *sess_priv = get_sec_session_private_data(sess);
2629         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2630
2631         if (sess_priv) {
2632                 if (s->bpi_ctx)
2633                         bpi_cipher_ctx_free(s->bpi_ctx);
2634                 memset(s, 0, qat_sym_session_get_private_size(dev));
2635                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2636
2637                 set_sec_session_private_data(sess, NULL);
2638                 rte_mempool_put(sess_mp, sess_priv);
2639         }
2640         return 0;
2641 }
2642 #endif