net/bnxt: support runtime queue setup
[dpdk.git] / drivers / crypto / qat / qat_sym_session.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  * Copyright(c) 2015-2019 Intel Corporation
3  */
4
5 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
6 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
7 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
8 #include <openssl/evp.h>        /* Needed for bpi runt block processing */
9
10 #include <rte_memcpy.h>
11 #include <rte_common.h>
12 #include <rte_spinlock.h>
13 #include <rte_byteorder.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_crypto_sym.h>
17 #ifdef RTE_LIB_SECURITY
18 #include <rte_security.h>
19 #endif
20
21 #include "qat_logs.h"
22 #include "qat_sym_session.h"
23 #include "qat_sym_pmd.h"
24
25 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
26 static const uint8_t sha1InitialState[] = {
27         0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
28         0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
29
30 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
31 static const uint8_t sha224InitialState[] = {
32         0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
33         0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
34         0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
35
36 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
37 static const uint8_t sha256InitialState[] = {
38         0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
39         0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
40         0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
41
42 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
43 static const uint8_t sha384InitialState[] = {
44         0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
45         0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
46         0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
47         0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
48         0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
49         0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
50
51 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
52 static const uint8_t sha512InitialState[] = {
53         0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
54         0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
55         0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
56         0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
57         0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
58         0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
59
60 /** Frees a context previously created
61  *  Depends on openssl libcrypto
62  */
63 static void
64 bpi_cipher_ctx_free(void *bpi_ctx)
65 {
66         if (bpi_ctx != NULL)
67                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
68 }
69
70 /** Creates a context in either AES or DES in ECB mode
71  *  Depends on openssl libcrypto
72  */
73 static int
74 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
75                 enum rte_crypto_cipher_operation direction __rte_unused,
76                 const uint8_t *key, uint16_t key_length, void **ctx)
77 {
78         const EVP_CIPHER *algo = NULL;
79         int ret;
80         *ctx = EVP_CIPHER_CTX_new();
81
82         if (*ctx == NULL) {
83                 ret = -ENOMEM;
84                 goto ctx_init_err;
85         }
86
87         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
88                 algo = EVP_des_ecb();
89         else
90                 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
91                         algo = EVP_aes_128_ecb();
92                 else
93                         algo = EVP_aes_256_ecb();
94
95         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
96         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
97                 ret = -EINVAL;
98                 goto ctx_init_err;
99         }
100
101         return 0;
102
103 ctx_init_err:
104         if (*ctx != NULL)
105                 EVP_CIPHER_CTX_free(*ctx);
106         return ret;
107 }
108
109 static int
110 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
111                 struct qat_sym_dev_private *internals)
112 {
113         int i = 0;
114         const struct rte_cryptodev_capabilities *capability;
115
116         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
117                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
118                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
119                         continue;
120
121                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
122                         continue;
123
124                 if (capability->sym.cipher.algo == algo)
125                         return 1;
126         }
127         return 0;
128 }
129
130 static int
131 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
132                 struct qat_sym_dev_private *internals)
133 {
134         int i = 0;
135         const struct rte_cryptodev_capabilities *capability;
136
137         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
138                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
139                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
140                         continue;
141
142                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
143                         continue;
144
145                 if (capability->sym.auth.algo == algo)
146                         return 1;
147         }
148         return 0;
149 }
150
151 void
152 qat_sym_session_clear(struct rte_cryptodev *dev,
153                 struct rte_cryptodev_sym_session *sess)
154 {
155         uint8_t index = dev->driver_id;
156         void *sess_priv = get_sym_session_private_data(sess, index);
157         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
158
159         if (sess_priv) {
160                 if (s->bpi_ctx)
161                         bpi_cipher_ctx_free(s->bpi_ctx);
162                 memset(s, 0, qat_sym_session_get_private_size(dev));
163                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
164
165                 set_sym_session_private_data(sess, index, NULL);
166                 rte_mempool_put(sess_mp, sess_priv);
167         }
168 }
169
170 static int
171 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
172 {
173         /* Cipher Only */
174         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
175                 return ICP_QAT_FW_LA_CMD_CIPHER;
176
177         /* Authentication Only */
178         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
179                 return ICP_QAT_FW_LA_CMD_AUTH;
180
181         /* AEAD */
182         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
183                 /* AES-GCM and AES-CCM works with different direction
184                  * GCM first encrypts and generate hash where AES-CCM
185                  * first generate hash and encrypts. Similar relation
186                  * applies to decryption.
187                  */
188                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
189                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
190                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
191                         else
192                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
193                 else
194                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
195                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
196                         else
197                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
198         }
199
200         if (xform->next == NULL)
201                 return -1;
202
203         /* Cipher then Authenticate */
204         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
205                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
206                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
207
208         /* Authenticate then Cipher */
209         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
210                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
211                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
212
213         return -1;
214 }
215
216 static struct rte_crypto_auth_xform *
217 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
218 {
219         do {
220                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
221                         return &xform->auth;
222
223                 xform = xform->next;
224         } while (xform);
225
226         return NULL;
227 }
228
229 static struct rte_crypto_cipher_xform *
230 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
231 {
232         do {
233                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
234                         return &xform->cipher;
235
236                 xform = xform->next;
237         } while (xform);
238
239         return NULL;
240 }
241
242 int
243 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
244                 struct rte_crypto_sym_xform *xform,
245                 struct qat_sym_session *session)
246 {
247         struct qat_sym_dev_private *internals = dev->data->dev_private;
248         struct rte_crypto_cipher_xform *cipher_xform = NULL;
249         int ret;
250
251         /* Get cipher xform from crypto xform chain */
252         cipher_xform = qat_get_cipher_xform(xform);
253
254         session->cipher_iv.offset = cipher_xform->iv.offset;
255         session->cipher_iv.length = cipher_xform->iv.length;
256
257         switch (cipher_xform->algo) {
258         case RTE_CRYPTO_CIPHER_AES_CBC:
259                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
260                                 &session->qat_cipher_alg) != 0) {
261                         QAT_LOG(ERR, "Invalid AES cipher key size");
262                         ret = -EINVAL;
263                         goto error_out;
264                 }
265                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
266                 break;
267         case RTE_CRYPTO_CIPHER_AES_CTR:
268                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
269                                 &session->qat_cipher_alg) != 0) {
270                         QAT_LOG(ERR, "Invalid AES cipher key size");
271                         ret = -EINVAL;
272                         goto error_out;
273                 }
274                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
275                 break;
276         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
277                 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
278                                         &session->qat_cipher_alg) != 0) {
279                         QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
280                         ret = -EINVAL;
281                         goto error_out;
282                 }
283                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
284                 break;
285         case RTE_CRYPTO_CIPHER_NULL:
286                 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
287                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
288                 break;
289         case RTE_CRYPTO_CIPHER_KASUMI_F8:
290                 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
291                                         &session->qat_cipher_alg) != 0) {
292                         QAT_LOG(ERR, "Invalid KASUMI cipher key size");
293                         ret = -EINVAL;
294                         goto error_out;
295                 }
296                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
297                 break;
298         case RTE_CRYPTO_CIPHER_3DES_CBC:
299                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
300                                 &session->qat_cipher_alg) != 0) {
301                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
302                         ret = -EINVAL;
303                         goto error_out;
304                 }
305                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
306                 break;
307         case RTE_CRYPTO_CIPHER_DES_CBC:
308                 if (qat_sym_validate_des_key(cipher_xform->key.length,
309                                 &session->qat_cipher_alg) != 0) {
310                         QAT_LOG(ERR, "Invalid DES cipher key size");
311                         ret = -EINVAL;
312                         goto error_out;
313                 }
314                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
315                 break;
316         case RTE_CRYPTO_CIPHER_3DES_CTR:
317                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
318                                 &session->qat_cipher_alg) != 0) {
319                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
320                         ret = -EINVAL;
321                         goto error_out;
322                 }
323                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
324                 break;
325         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
326                 ret = bpi_cipher_ctx_init(
327                                         cipher_xform->algo,
328                                         cipher_xform->op,
329                                         cipher_xform->key.data,
330                                         cipher_xform->key.length,
331                                         &session->bpi_ctx);
332                 if (ret != 0) {
333                         QAT_LOG(ERR, "failed to create DES BPI ctx");
334                         goto error_out;
335                 }
336                 if (qat_sym_validate_des_key(cipher_xform->key.length,
337                                 &session->qat_cipher_alg) != 0) {
338                         QAT_LOG(ERR, "Invalid DES cipher key size");
339                         ret = -EINVAL;
340                         goto error_out;
341                 }
342                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
343                 break;
344         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
345                 ret = bpi_cipher_ctx_init(
346                                         cipher_xform->algo,
347                                         cipher_xform->op,
348                                         cipher_xform->key.data,
349                                         cipher_xform->key.length,
350                                         &session->bpi_ctx);
351                 if (ret != 0) {
352                         QAT_LOG(ERR, "failed to create AES BPI ctx");
353                         goto error_out;
354                 }
355                 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
356                                 &session->qat_cipher_alg) != 0) {
357                         QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
358                         ret = -EINVAL;
359                         goto error_out;
360                 }
361                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
362                 break;
363         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
364                 if (!qat_is_cipher_alg_supported(
365                         cipher_xform->algo, internals)) {
366                         QAT_LOG(ERR, "%s not supported on this device",
367                                 rte_crypto_cipher_algorithm_strings
368                                         [cipher_xform->algo]);
369                         ret = -ENOTSUP;
370                         goto error_out;
371                 }
372                 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
373                                 &session->qat_cipher_alg) != 0) {
374                         QAT_LOG(ERR, "Invalid ZUC cipher key size");
375                         ret = -EINVAL;
376                         goto error_out;
377                 }
378                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
379                 break;
380         case RTE_CRYPTO_CIPHER_AES_XTS:
381                 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
382                         QAT_LOG(ERR, "AES-XTS-192 not supported");
383                         ret = -EINVAL;
384                         goto error_out;
385                 }
386                 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
387                                 &session->qat_cipher_alg) != 0) {
388                         QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
389                         ret = -EINVAL;
390                         goto error_out;
391                 }
392                 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
393                 break;
394         case RTE_CRYPTO_CIPHER_3DES_ECB:
395         case RTE_CRYPTO_CIPHER_AES_ECB:
396         case RTE_CRYPTO_CIPHER_AES_F8:
397         case RTE_CRYPTO_CIPHER_ARC4:
398                 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
399                                 cipher_xform->algo);
400                 ret = -ENOTSUP;
401                 goto error_out;
402         default:
403                 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
404                                 cipher_xform->algo);
405                 ret = -EINVAL;
406                 goto error_out;
407         }
408
409         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
410                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
411         else
412                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
413
414         if (qat_sym_session_aead_create_cd_cipher(session,
415                                                 cipher_xform->key.data,
416                                                 cipher_xform->key.length)) {
417                 ret = -EINVAL;
418                 goto error_out;
419         }
420
421         return 0;
422
423 error_out:
424         if (session->bpi_ctx) {
425                 bpi_cipher_ctx_free(session->bpi_ctx);
426                 session->bpi_ctx = NULL;
427         }
428         return ret;
429 }
430
431 int
432 qat_sym_session_configure(struct rte_cryptodev *dev,
433                 struct rte_crypto_sym_xform *xform,
434                 struct rte_cryptodev_sym_session *sess,
435                 struct rte_mempool *mempool)
436 {
437         void *sess_private_data;
438         int ret;
439
440         if (rte_mempool_get(mempool, &sess_private_data)) {
441                 CDEV_LOG_ERR(
442                         "Couldn't get object from session mempool");
443                 return -ENOMEM;
444         }
445
446         ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
447         if (ret != 0) {
448                 QAT_LOG(ERR,
449                     "Crypto QAT PMD: failed to configure session parameters");
450
451                 /* Return session to mempool */
452                 rte_mempool_put(mempool, sess_private_data);
453                 return ret;
454         }
455
456         set_sym_session_private_data(sess, dev->driver_id,
457                 sess_private_data);
458
459         return 0;
460 }
461
462 static void
463 qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
464                 uint8_t hash_flag)
465 {
466         struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
467         struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
468                         (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
469                         session->fw_req.cd_ctrl.content_desc_ctrl_lw;
470
471         /* Set the Use Extended Protocol Flags bit in LW 1 */
472         QAT_FIELD_SET(header->comn_req_flags,
473                         QAT_COMN_EXT_FLAGS_USED,
474                         QAT_COMN_EXT_FLAGS_BITPOS,
475                         QAT_COMN_EXT_FLAGS_MASK);
476
477         /* Set Hash Flags in LW 28 */
478         cd_ctrl->hash_flags |= hash_flag;
479
480         /* Set proto flags in LW 1 */
481         switch (session->qat_cipher_alg) {
482         case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
483                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
484                                 ICP_QAT_FW_LA_SNOW_3G_PROTO);
485                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
486                                 header->serv_specif_flags, 0);
487                 break;
488         case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
489                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
490                                 ICP_QAT_FW_LA_NO_PROTO);
491                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
492                                 header->serv_specif_flags,
493                                 ICP_QAT_FW_LA_ZUC_3G_PROTO);
494                 break;
495         default:
496                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
497                                 ICP_QAT_FW_LA_NO_PROTO);
498                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
499                                 header->serv_specif_flags, 0);
500                 break;
501         }
502 }
503
504 static void
505 qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
506                 struct qat_sym_session *session)
507 {
508         const struct qat_sym_dev_private *qat_private = dev->data->dev_private;
509         enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
510                         QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
511
512         if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
513                         session->qat_cipher_alg !=
514                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
515                 session->min_qat_dev_gen = min_dev_gen;
516                 qat_sym_session_set_ext_hash_flags(session,
517                         1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
518         } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
519                         session->qat_cipher_alg !=
520                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
521                 session->min_qat_dev_gen = min_dev_gen;
522                 qat_sym_session_set_ext_hash_flags(session,
523                         1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
524         } else if ((session->aes_cmac ||
525                         session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
526                         (session->qat_cipher_alg ==
527                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
528                         session->qat_cipher_alg ==
529                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
530                 session->min_qat_dev_gen = min_dev_gen;
531                 qat_sym_session_set_ext_hash_flags(session, 0);
532         }
533 }
534
535 int
536 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
537                 struct rte_crypto_sym_xform *xform, void *session_private)
538 {
539         struct qat_sym_session *session = session_private;
540         struct qat_sym_dev_private *internals = dev->data->dev_private;
541         enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
542         int ret;
543         int qat_cmd_id;
544
545         /* Verify the session physical address is known */
546         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
547         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
548                 QAT_LOG(ERR,
549                         "Session physical address unknown. Bad memory pool.");
550                 return -EINVAL;
551         }
552
553         /* Set context descriptor physical address */
554         session->cd_paddr = session_paddr +
555                         offsetof(struct qat_sym_session, cd);
556
557         session->min_qat_dev_gen = QAT_GEN1;
558
559         /* Get requested QAT command id */
560         qat_cmd_id = qat_get_cmd_id(xform);
561         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
562                 QAT_LOG(ERR, "Unsupported xform chain requested");
563                 return -ENOTSUP;
564         }
565         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
566         switch (session->qat_cmd) {
567         case ICP_QAT_FW_LA_CMD_CIPHER:
568                 ret = qat_sym_session_configure_cipher(dev, xform, session);
569                 if (ret < 0)
570                         return ret;
571                 break;
572         case ICP_QAT_FW_LA_CMD_AUTH:
573                 ret = qat_sym_session_configure_auth(dev, xform, session);
574                 if (ret < 0)
575                         return ret;
576                 session->is_single_pass_gmac =
577                                qat_dev_gen == QAT_GEN3 &&
578                                xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
579                                xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
580                 break;
581         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
582                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
583                         ret = qat_sym_session_configure_aead(dev, xform,
584                                         session);
585                         if (ret < 0)
586                                 return ret;
587                 } else {
588                         ret = qat_sym_session_configure_cipher(dev,
589                                         xform, session);
590                         if (ret < 0)
591                                 return ret;
592                         ret = qat_sym_session_configure_auth(dev,
593                                         xform, session);
594                         if (ret < 0)
595                                 return ret;
596                         /* Special handling of mixed hash+cipher algorithms */
597                         qat_sym_session_handle_mixed(dev, session);
598                 }
599                 break;
600         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
601                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
602                         ret = qat_sym_session_configure_aead(dev, xform,
603                                         session);
604                         if (ret < 0)
605                                 return ret;
606                 } else {
607                         ret = qat_sym_session_configure_auth(dev,
608                                         xform, session);
609                         if (ret < 0)
610                                 return ret;
611                         ret = qat_sym_session_configure_cipher(dev,
612                                         xform, session);
613                         if (ret < 0)
614                                 return ret;
615                         /* Special handling of mixed hash+cipher algorithms */
616                         qat_sym_session_handle_mixed(dev, session);
617                 }
618                 break;
619         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
620         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
621         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
622         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
623         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
624         case ICP_QAT_FW_LA_CMD_MGF1:
625         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
626         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
627         case ICP_QAT_FW_LA_CMD_DELIMITER:
628         QAT_LOG(ERR, "Unsupported Service %u",
629                 session->qat_cmd);
630                 return -ENOTSUP;
631         default:
632         QAT_LOG(ERR, "Unsupported Service %u",
633                 session->qat_cmd);
634                 return -ENOTSUP;
635         }
636
637         return 0;
638 }
639
640 static int
641 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
642                 struct rte_crypto_aead_xform *aead_xform)
643 {
644         struct icp_qat_fw_la_cipher_req_params *cipher_param =
645                         (void *) &session->fw_req.serv_specif_rqpars;
646
647         session->is_single_pass = 1;
648         session->min_qat_dev_gen = QAT_GEN3;
649         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
650         if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
651                 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
652                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
653                         session->fw_req.comn_hdr.serv_specif_flags,
654                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
655         } else {
656                 /* Chacha-Poly is special case that use QAT CTR mode */
657                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
658         }
659         session->cipher_iv.offset = aead_xform->iv.offset;
660         session->cipher_iv.length = aead_xform->iv.length;
661         if (qat_sym_session_aead_create_cd_cipher(session,
662                         aead_xform->key.data, aead_xform->key.length))
663                 return -EINVAL;
664         session->aad_len = aead_xform->aad_length;
665         session->digest_length = aead_xform->digest_length;
666         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
667                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
668                 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
669                 ICP_QAT_FW_LA_RET_AUTH_SET(
670                         session->fw_req.comn_hdr.serv_specif_flags,
671                         ICP_QAT_FW_LA_RET_AUTH_RES);
672         } else {
673                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
674                 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
675                 ICP_QAT_FW_LA_CMP_AUTH_SET(
676                         session->fw_req.comn_hdr.serv_specif_flags,
677                         ICP_QAT_FW_LA_CMP_AUTH_RES);
678         }
679         ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
680                         session->fw_req.comn_hdr.serv_specif_flags,
681                         ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
682         ICP_QAT_FW_LA_PROTO_SET(
683                         session->fw_req.comn_hdr.serv_specif_flags,
684                         ICP_QAT_FW_LA_NO_PROTO);
685         session->fw_req.comn_hdr.service_cmd_id =
686                         ICP_QAT_FW_LA_CMD_CIPHER;
687         session->cd.cipher.cipher_config.val =
688                         ICP_QAT_HW_CIPHER_CONFIG_BUILD(
689                                 ICP_QAT_HW_CIPHER_AEAD_MODE,
690                                 session->qat_cipher_alg,
691                                 ICP_QAT_HW_CIPHER_NO_CONVERT,
692                                 session->qat_dir);
693         QAT_FIELD_SET(session->cd.cipher.cipher_config.val,
694                         aead_xform->digest_length,
695                         QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
696                         QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
697         session->cd.cipher.cipher_config.reserved =
698                         ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
699                                 aead_xform->aad_length);
700         cipher_param->spc_aad_sz = aead_xform->aad_length;
701         cipher_param->spc_auth_res_sz = aead_xform->digest_length;
702
703         return 0;
704 }
705
706 int
707 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
708                                 struct rte_crypto_sym_xform *xform,
709                                 struct qat_sym_session *session)
710 {
711         struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
712         struct qat_sym_dev_private *internals = dev->data->dev_private;
713         const uint8_t *key_data = auth_xform->key.data;
714         uint8_t key_length = auth_xform->key.length;
715
716         session->aes_cmac = 0;
717         session->auth_key_length = auth_xform->key.length;
718         session->auth_iv.offset = auth_xform->iv.offset;
719         session->auth_iv.length = auth_xform->iv.length;
720         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
721
722         switch (auth_xform->algo) {
723         case RTE_CRYPTO_AUTH_SHA1:
724                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
725                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
726                 break;
727         case RTE_CRYPTO_AUTH_SHA224:
728                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
729                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
730                 break;
731         case RTE_CRYPTO_AUTH_SHA256:
732                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
733                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
734                 break;
735         case RTE_CRYPTO_AUTH_SHA384:
736                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
737                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
738                 break;
739         case RTE_CRYPTO_AUTH_SHA512:
740                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
741                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
742                 break;
743         case RTE_CRYPTO_AUTH_SHA1_HMAC:
744                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
745                 break;
746         case RTE_CRYPTO_AUTH_SHA224_HMAC:
747                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
748                 break;
749         case RTE_CRYPTO_AUTH_SHA256_HMAC:
750                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
751                 break;
752         case RTE_CRYPTO_AUTH_SHA384_HMAC:
753                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
754                 break;
755         case RTE_CRYPTO_AUTH_SHA512_HMAC:
756                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
757                 break;
758         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
759                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
760                 break;
761         case RTE_CRYPTO_AUTH_AES_CMAC:
762                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
763                 session->aes_cmac = 1;
764                 break;
765         case RTE_CRYPTO_AUTH_AES_GMAC:
766                 if (qat_sym_validate_aes_key(auth_xform->key.length,
767                                 &session->qat_cipher_alg) != 0) {
768                         QAT_LOG(ERR, "Invalid AES key size");
769                         return -EINVAL;
770                 }
771                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
772                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
773                 if (session->auth_iv.length == 0)
774                         session->auth_iv.length = AES_GCM_J0_LEN;
775                 break;
776         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
777                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
778                 break;
779         case RTE_CRYPTO_AUTH_MD5_HMAC:
780                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
781                 break;
782         case RTE_CRYPTO_AUTH_NULL:
783                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
784                 break;
785         case RTE_CRYPTO_AUTH_KASUMI_F9:
786                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
787                 break;
788         case RTE_CRYPTO_AUTH_ZUC_EIA3:
789                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
790                         QAT_LOG(ERR, "%s not supported on this device",
791                                 rte_crypto_auth_algorithm_strings
792                                 [auth_xform->algo]);
793                         return -ENOTSUP;
794                 }
795                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
796                 break;
797         case RTE_CRYPTO_AUTH_MD5:
798         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
799                 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
800                                 auth_xform->algo);
801                 return -ENOTSUP;
802         default:
803                 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
804                                 auth_xform->algo);
805                 return -EINVAL;
806         }
807
808         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
809                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
810                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
811                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
812                         /*
813                          * It needs to create cipher desc content first,
814                          * then authentication
815                          */
816
817                         if (qat_sym_session_aead_create_cd_cipher(session,
818                                                 auth_xform->key.data,
819                                                 auth_xform->key.length))
820                                 return -EINVAL;
821
822                         if (qat_sym_session_aead_create_cd_auth(session,
823                                                 key_data,
824                                                 key_length,
825                                                 0,
826                                                 auth_xform->digest_length,
827                                                 auth_xform->op))
828                                 return -EINVAL;
829                 } else {
830                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
831                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
832                         /*
833                          * It needs to create authentication desc content first,
834                          * then cipher
835                          */
836
837                         if (qat_sym_session_aead_create_cd_auth(session,
838                                         key_data,
839                                         key_length,
840                                         0,
841                                         auth_xform->digest_length,
842                                         auth_xform->op))
843                                 return -EINVAL;
844
845                         if (qat_sym_session_aead_create_cd_cipher(session,
846                                                 auth_xform->key.data,
847                                                 auth_xform->key.length))
848                                 return -EINVAL;
849                 }
850                 /* Restore to authentication only only */
851                 session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
852         } else {
853                 if (qat_sym_session_aead_create_cd_auth(session,
854                                 key_data,
855                                 key_length,
856                                 0,
857                                 auth_xform->digest_length,
858                                 auth_xform->op))
859                         return -EINVAL;
860         }
861
862         session->digest_length = auth_xform->digest_length;
863         return 0;
864 }
865
866 int
867 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
868                                 struct rte_crypto_sym_xform *xform,
869                                 struct qat_sym_session *session)
870 {
871         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
872         enum rte_crypto_auth_operation crypto_operation;
873         struct qat_sym_dev_private *internals =
874                         dev->data->dev_private;
875         enum qat_device_gen qat_dev_gen =
876                         internals->qat_dev->qat_dev_gen;
877
878         /*
879          * Store AEAD IV parameters as cipher IV,
880          * to avoid unnecessary memory usage
881          */
882         session->cipher_iv.offset = xform->aead.iv.offset;
883         session->cipher_iv.length = xform->aead.iv.length;
884
885         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
886
887         session->is_single_pass = 0;
888         switch (aead_xform->algo) {
889         case RTE_CRYPTO_AEAD_AES_GCM:
890                 if (qat_sym_validate_aes_key(aead_xform->key.length,
891                                 &session->qat_cipher_alg) != 0) {
892                         QAT_LOG(ERR, "Invalid AES key size");
893                         return -EINVAL;
894                 }
895                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
896                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
897                 if (qat_dev_gen > QAT_GEN2 && aead_xform->iv.length ==
898                                 QAT_AES_GCM_SPC_IV_SIZE) {
899                         return qat_sym_session_handle_single_pass(session,
900                                         aead_xform);
901                 }
902                 if (session->cipher_iv.length == 0)
903                         session->cipher_iv.length = AES_GCM_J0_LEN;
904
905                 break;
906         case RTE_CRYPTO_AEAD_AES_CCM:
907                 if (qat_sym_validate_aes_key(aead_xform->key.length,
908                                 &session->qat_cipher_alg) != 0) {
909                         QAT_LOG(ERR, "Invalid AES key size");
910                         return -EINVAL;
911                 }
912                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
913                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
914                 break;
915         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
916                 if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
917                         return -EINVAL;
918                 session->qat_cipher_alg =
919                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
920                 return qat_sym_session_handle_single_pass(session,
921                                                 aead_xform);
922         default:
923                 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
924                                 aead_xform->algo);
925                 return -EINVAL;
926         }
927
928         if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
929                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
930                         (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
931                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
932                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
933                 /*
934                  * It needs to create cipher desc content first,
935                  * then authentication
936                  */
937                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
938                         RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
939
940                 if (qat_sym_session_aead_create_cd_cipher(session,
941                                         aead_xform->key.data,
942                                         aead_xform->key.length))
943                         return -EINVAL;
944
945                 if (qat_sym_session_aead_create_cd_auth(session,
946                                         aead_xform->key.data,
947                                         aead_xform->key.length,
948                                         aead_xform->aad_length,
949                                         aead_xform->digest_length,
950                                         crypto_operation))
951                         return -EINVAL;
952         } else {
953                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
954                 /*
955                  * It needs to create authentication desc content first,
956                  * then cipher
957                  */
958
959                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
960                         RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
961
962                 if (qat_sym_session_aead_create_cd_auth(session,
963                                         aead_xform->key.data,
964                                         aead_xform->key.length,
965                                         aead_xform->aad_length,
966                                         aead_xform->digest_length,
967                                         crypto_operation))
968                         return -EINVAL;
969
970                 if (qat_sym_session_aead_create_cd_cipher(session,
971                                         aead_xform->key.data,
972                                         aead_xform->key.length))
973                         return -EINVAL;
974         }
975
976         session->digest_length = aead_xform->digest_length;
977         return 0;
978 }
979
980 unsigned int qat_sym_session_get_private_size(
981                 struct rte_cryptodev *dev __rte_unused)
982 {
983         return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
984 }
985
986 /* returns block size in bytes per cipher algo */
987 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
988 {
989         switch (qat_cipher_alg) {
990         case ICP_QAT_HW_CIPHER_ALGO_DES:
991                 return ICP_QAT_HW_DES_BLK_SZ;
992         case ICP_QAT_HW_CIPHER_ALGO_3DES:
993                 return ICP_QAT_HW_3DES_BLK_SZ;
994         case ICP_QAT_HW_CIPHER_ALGO_AES128:
995         case ICP_QAT_HW_CIPHER_ALGO_AES192:
996         case ICP_QAT_HW_CIPHER_ALGO_AES256:
997                 return ICP_QAT_HW_AES_BLK_SZ;
998         default:
999                 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
1000                 return -EFAULT;
1001         };
1002         return -EFAULT;
1003 }
1004
1005 /*
1006  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
1007  * This is digest size rounded up to nearest quadword
1008  */
1009 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1010 {
1011         switch (qat_hash_alg) {
1012         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1013                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
1014                                                 QAT_HW_DEFAULT_ALIGNMENT);
1015         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1016                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
1017                                                 QAT_HW_DEFAULT_ALIGNMENT);
1018         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1019                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
1020                                                 QAT_HW_DEFAULT_ALIGNMENT);
1021         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1022                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
1023                                                 QAT_HW_DEFAULT_ALIGNMENT);
1024         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1025                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1026                                                 QAT_HW_DEFAULT_ALIGNMENT);
1027         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1028                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
1029                                                 QAT_HW_DEFAULT_ALIGNMENT);
1030         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1031         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1032                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1033                                                 QAT_HW_DEFAULT_ALIGNMENT);
1034         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1035                 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1036                                                 QAT_HW_DEFAULT_ALIGNMENT);
1037         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1038                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1039                                                 QAT_HW_DEFAULT_ALIGNMENT);
1040         case ICP_QAT_HW_AUTH_ALGO_MD5:
1041                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1042                                                 QAT_HW_DEFAULT_ALIGNMENT);
1043         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1044                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1045                                                 QAT_HW_DEFAULT_ALIGNMENT);
1046         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1047                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1048                                                 QAT_HW_DEFAULT_ALIGNMENT);
1049         case ICP_QAT_HW_AUTH_ALGO_NULL:
1050                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1051                                                 QAT_HW_DEFAULT_ALIGNMENT);
1052         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1053                 /* return maximum state1 size in this case */
1054                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1055                                                 QAT_HW_DEFAULT_ALIGNMENT);
1056         default:
1057                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1058                 return -EFAULT;
1059         };
1060         return -EFAULT;
1061 }
1062
1063 /* returns digest size in bytes  per hash algo */
1064 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1065 {
1066         switch (qat_hash_alg) {
1067         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1068                 return ICP_QAT_HW_SHA1_STATE1_SZ;
1069         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1070                 return ICP_QAT_HW_SHA224_STATE1_SZ;
1071         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1072                 return ICP_QAT_HW_SHA256_STATE1_SZ;
1073         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1074                 return ICP_QAT_HW_SHA384_STATE1_SZ;
1075         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1076                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1077         case ICP_QAT_HW_AUTH_ALGO_MD5:
1078                 return ICP_QAT_HW_MD5_STATE1_SZ;
1079         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1080                 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1081         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1082                 /* return maximum digest size in this case */
1083                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1084         default:
1085                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1086                 return -EFAULT;
1087         };
1088         return -EFAULT;
1089 }
1090
1091 /* returns block size in byes per hash algo */
1092 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1093 {
1094         switch (qat_hash_alg) {
1095         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1096                 return SHA_CBLOCK;
1097         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1098                 return SHA256_CBLOCK;
1099         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1100                 return SHA256_CBLOCK;
1101         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1102                 return SHA512_CBLOCK;
1103         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1104                 return SHA512_CBLOCK;
1105         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1106                 return 16;
1107         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1108                 return ICP_QAT_HW_AES_BLK_SZ;
1109         case ICP_QAT_HW_AUTH_ALGO_MD5:
1110                 return MD5_CBLOCK;
1111         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1112                 /* return maximum block size in this case */
1113                 return SHA512_CBLOCK;
1114         default:
1115                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1116                 return -EFAULT;
1117         };
1118         return -EFAULT;
1119 }
1120
1121 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1122 {
1123         SHA_CTX ctx;
1124
1125         if (!SHA1_Init(&ctx))
1126                 return -EFAULT;
1127         SHA1_Transform(&ctx, data_in);
1128         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1129         return 0;
1130 }
1131
1132 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1133 {
1134         SHA256_CTX ctx;
1135
1136         if (!SHA224_Init(&ctx))
1137                 return -EFAULT;
1138         SHA256_Transform(&ctx, data_in);
1139         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1140         return 0;
1141 }
1142
1143 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1144 {
1145         SHA256_CTX ctx;
1146
1147         if (!SHA256_Init(&ctx))
1148                 return -EFAULT;
1149         SHA256_Transform(&ctx, data_in);
1150         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1151         return 0;
1152 }
1153
1154 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1155 {
1156         SHA512_CTX ctx;
1157
1158         if (!SHA384_Init(&ctx))
1159                 return -EFAULT;
1160         SHA512_Transform(&ctx, data_in);
1161         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1162         return 0;
1163 }
1164
1165 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1166 {
1167         SHA512_CTX ctx;
1168
1169         if (!SHA512_Init(&ctx))
1170                 return -EFAULT;
1171         SHA512_Transform(&ctx, data_in);
1172         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1173         return 0;
1174 }
1175
1176 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1177 {
1178         MD5_CTX ctx;
1179
1180         if (!MD5_Init(&ctx))
1181                 return -EFAULT;
1182         MD5_Transform(&ctx, data_in);
1183         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1184
1185         return 0;
1186 }
1187
1188 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1189                         uint8_t *data_in,
1190                         uint8_t *data_out)
1191 {
1192         int digest_size;
1193         uint8_t digest[qat_hash_get_digest_size(
1194                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1195         uint32_t *hash_state_out_be32;
1196         uint64_t *hash_state_out_be64;
1197         int i;
1198
1199         /* Initialize to avoid gcc warning */
1200         memset(digest, 0, sizeof(digest));
1201
1202         digest_size = qat_hash_get_digest_size(hash_alg);
1203         if (digest_size <= 0)
1204                 return -EFAULT;
1205
1206         hash_state_out_be32 = (uint32_t *)data_out;
1207         hash_state_out_be64 = (uint64_t *)data_out;
1208
1209         switch (hash_alg) {
1210         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1211                 if (partial_hash_sha1(data_in, digest))
1212                         return -EFAULT;
1213                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1214                         *hash_state_out_be32 =
1215                                 rte_bswap32(*(((uint32_t *)digest)+i));
1216                 break;
1217         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1218                 if (partial_hash_sha224(data_in, digest))
1219                         return -EFAULT;
1220                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1221                         *hash_state_out_be32 =
1222                                 rte_bswap32(*(((uint32_t *)digest)+i));
1223                 break;
1224         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1225                 if (partial_hash_sha256(data_in, digest))
1226                         return -EFAULT;
1227                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1228                         *hash_state_out_be32 =
1229                                 rte_bswap32(*(((uint32_t *)digest)+i));
1230                 break;
1231         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1232                 if (partial_hash_sha384(data_in, digest))
1233                         return -EFAULT;
1234                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1235                         *hash_state_out_be64 =
1236                                 rte_bswap64(*(((uint64_t *)digest)+i));
1237                 break;
1238         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1239                 if (partial_hash_sha512(data_in, digest))
1240                         return -EFAULT;
1241                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1242                         *hash_state_out_be64 =
1243                                 rte_bswap64(*(((uint64_t *)digest)+i));
1244                 break;
1245         case ICP_QAT_HW_AUTH_ALGO_MD5:
1246                 if (partial_hash_md5(data_in, data_out))
1247                         return -EFAULT;
1248                 break;
1249         default:
1250                 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1251                 return -EFAULT;
1252         }
1253
1254         return 0;
1255 }
1256 #define HMAC_IPAD_VALUE 0x36
1257 #define HMAC_OPAD_VALUE 0x5c
1258 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1259
1260 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1261
1262 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1263 {
1264         int i;
1265
1266         derived[0] = base[0] << 1;
1267         for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1268                 derived[i] = base[i] << 1;
1269                 derived[i - 1] |= base[i] >> 7;
1270         }
1271
1272         if (base[0] & 0x80)
1273                 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1274 }
1275
1276 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1277                                 const uint8_t *auth_key,
1278                                 uint16_t auth_keylen,
1279                                 uint8_t *p_state_buf,
1280                                 uint16_t *p_state_len,
1281                                 uint8_t aes_cmac)
1282 {
1283         int block_size;
1284         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1285         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1286         int i;
1287
1288         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1289
1290                 /* CMAC */
1291                 if (aes_cmac) {
1292                         AES_KEY enc_key;
1293                         uint8_t *in = NULL;
1294                         uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1295                         uint8_t *k1, *k2;
1296
1297                         auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1298
1299                         in = rte_zmalloc("AES CMAC K1",
1300                                          ICP_QAT_HW_AES_128_KEY_SZ, 16);
1301
1302                         if (in == NULL) {
1303                                 QAT_LOG(ERR, "Failed to alloc memory");
1304                                 return -ENOMEM;
1305                         }
1306
1307                         rte_memcpy(in, AES_CMAC_SEED,
1308                                    ICP_QAT_HW_AES_128_KEY_SZ);
1309                         rte_memcpy(p_state_buf, auth_key, auth_keylen);
1310
1311                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1312                                 &enc_key) != 0) {
1313                                 rte_free(in);
1314                                 return -EFAULT;
1315                         }
1316
1317                         AES_encrypt(in, k0, &enc_key);
1318
1319                         k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1320                         k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1321
1322                         aes_cmac_key_derive(k0, k1);
1323                         aes_cmac_key_derive(k1, k2);
1324
1325                         memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1326                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1327                         rte_free(in);
1328                         return 0;
1329                 } else {
1330                         static uint8_t qat_aes_xcbc_key_seed[
1331                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1332                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1333                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1334                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1335                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1336                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1337                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1338                         };
1339
1340                         uint8_t *in = NULL;
1341                         uint8_t *out = p_state_buf;
1342                         int x;
1343                         AES_KEY enc_key;
1344
1345                         in = rte_zmalloc("working mem for key",
1346                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1347                         if (in == NULL) {
1348                                 QAT_LOG(ERR, "Failed to alloc memory");
1349                                 return -ENOMEM;
1350                         }
1351
1352                         rte_memcpy(in, qat_aes_xcbc_key_seed,
1353                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1354                         for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1355                                 if (AES_set_encrypt_key(auth_key,
1356                                                         auth_keylen << 3,
1357                                                         &enc_key) != 0) {
1358                                         rte_free(in -
1359                                           (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1360                                         memset(out -
1361                                            (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1362                                           0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1363                                         return -EFAULT;
1364                                 }
1365                                 AES_encrypt(in, out, &enc_key);
1366                                 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1367                                 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1368                         }
1369                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1370                         rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1371                         return 0;
1372                 }
1373
1374         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1375                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1376                 uint8_t *in = NULL;
1377                 uint8_t *out = p_state_buf;
1378                 AES_KEY enc_key;
1379
1380                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1381                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1382                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1383                 in = rte_zmalloc("working mem for key",
1384                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
1385                 if (in == NULL) {
1386                         QAT_LOG(ERR, "Failed to alloc memory");
1387                         return -ENOMEM;
1388                 }
1389
1390                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1391                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1392                         &enc_key) != 0) {
1393                         return -EFAULT;
1394                 }
1395                 AES_encrypt(in, out, &enc_key);
1396                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1397                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1398                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1399                 rte_free(in);
1400                 return 0;
1401         }
1402
1403         block_size = qat_hash_get_block_size(hash_alg);
1404         if (block_size < 0)
1405                 return block_size;
1406         /* init ipad and opad from key and xor with fixed values */
1407         memset(ipad, 0, block_size);
1408         memset(opad, 0, block_size);
1409
1410         if (auth_keylen > (unsigned int)block_size) {
1411                 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1412                 return -EFAULT;
1413         }
1414         rte_memcpy(ipad, auth_key, auth_keylen);
1415         rte_memcpy(opad, auth_key, auth_keylen);
1416
1417         for (i = 0; i < block_size; i++) {
1418                 uint8_t *ipad_ptr = ipad + i;
1419                 uint8_t *opad_ptr = opad + i;
1420                 *ipad_ptr ^= HMAC_IPAD_VALUE;
1421                 *opad_ptr ^= HMAC_OPAD_VALUE;
1422         }
1423
1424         /* do partial hash of ipad and copy to state1 */
1425         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1426                 memset(ipad, 0, block_size);
1427                 memset(opad, 0, block_size);
1428                 QAT_LOG(ERR, "ipad precompute failed");
1429                 return -EFAULT;
1430         }
1431
1432         /*
1433          * State len is a multiple of 8, so may be larger than the digest.
1434          * Put the partial hash of opad state_len bytes after state1
1435          */
1436         *p_state_len = qat_hash_get_state1_size(hash_alg);
1437         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1438                 memset(ipad, 0, block_size);
1439                 memset(opad, 0, block_size);
1440                 QAT_LOG(ERR, "opad precompute failed");
1441                 return -EFAULT;
1442         }
1443
1444         /*  don't leave data lying around */
1445         memset(ipad, 0, block_size);
1446         memset(opad, 0, block_size);
1447         return 0;
1448 }
1449
1450 static void
1451 qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
1452                 enum qat_sym_proto_flag proto_flags)
1453 {
1454         header->hdr_flags =
1455                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1456         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1457         header->comn_req_flags =
1458                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1459                                         QAT_COMN_PTR_TYPE_FLAT);
1460         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1461                                   ICP_QAT_FW_LA_PARTIAL_NONE);
1462         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1463                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1464
1465         switch (proto_flags)            {
1466         case QAT_CRYPTO_PROTO_FLAG_NONE:
1467                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1468                                         ICP_QAT_FW_LA_NO_PROTO);
1469                 break;
1470         case QAT_CRYPTO_PROTO_FLAG_CCM:
1471                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1472                                         ICP_QAT_FW_LA_CCM_PROTO);
1473                 break;
1474         case QAT_CRYPTO_PROTO_FLAG_GCM:
1475                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1476                                         ICP_QAT_FW_LA_GCM_PROTO);
1477                 break;
1478         case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1479                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1480                                         ICP_QAT_FW_LA_SNOW_3G_PROTO);
1481                 break;
1482         case QAT_CRYPTO_PROTO_FLAG_ZUC:
1483                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1484                         ICP_QAT_FW_LA_ZUC_3G_PROTO);
1485                 break;
1486         }
1487
1488         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1489                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
1490         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1491                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1492 }
1493
1494 /*
1495  *      Snow3G and ZUC should never use this function
1496  *      and set its protocol flag in both cipher and auth part of content
1497  *      descriptor building function
1498  */
1499 static enum qat_sym_proto_flag
1500 qat_get_crypto_proto_flag(uint16_t flags)
1501 {
1502         int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
1503         enum qat_sym_proto_flag qat_proto_flag =
1504                         QAT_CRYPTO_PROTO_FLAG_NONE;
1505
1506         switch (proto) {
1507         case ICP_QAT_FW_LA_GCM_PROTO:
1508                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1509                 break;
1510         case ICP_QAT_FW_LA_CCM_PROTO:
1511                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
1512                 break;
1513         }
1514
1515         return qat_proto_flag;
1516 }
1517
1518 int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
1519                                                 const uint8_t *cipherkey,
1520                                                 uint32_t cipherkeylen)
1521 {
1522         struct icp_qat_hw_cipher_algo_blk *cipher;
1523         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1524         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1525         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1526         void *ptr = &req_tmpl->cd_ctrl;
1527         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1528         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1529         enum icp_qat_hw_cipher_convert key_convert;
1530         enum qat_sym_proto_flag qat_proto_flag =
1531                 QAT_CRYPTO_PROTO_FLAG_NONE;
1532         uint32_t total_key_size;
1533         uint16_t cipher_offset, cd_size;
1534         uint32_t wordIndex  = 0;
1535         uint32_t *temp_key = NULL;
1536
1537         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1538                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1539                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1540                                         ICP_QAT_FW_SLICE_CIPHER);
1541                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1542                                         ICP_QAT_FW_SLICE_DRAM_WR);
1543                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1544                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1545                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1546                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1547                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1548         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1549                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1550                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1551                                         ICP_QAT_FW_SLICE_CIPHER);
1552                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1553                                         ICP_QAT_FW_SLICE_AUTH);
1554                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1555                                         ICP_QAT_FW_SLICE_AUTH);
1556                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1557                                         ICP_QAT_FW_SLICE_DRAM_WR);
1558                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1559         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1560                 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1561                 return -EFAULT;
1562         }
1563
1564         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1565                 /*
1566                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
1567                  * Overriding default values previously set
1568                  */
1569                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1570                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1571         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1572                 || cdesc->qat_cipher_alg ==
1573                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1574                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1575         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1576                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1577         else
1578                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1579
1580         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1581                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1582                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1583                 cipher_cd_ctrl->cipher_state_sz =
1584                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1585                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1586
1587         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1588                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1589                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1590                 cipher_cd_ctrl->cipher_padding_sz =
1591                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1592         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1593                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1594                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1595                 qat_proto_flag =
1596                         qat_get_crypto_proto_flag(header->serv_specif_flags);
1597         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1598                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1599                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1600                 qat_proto_flag =
1601                         qat_get_crypto_proto_flag(header->serv_specif_flags);
1602         } else if (cdesc->qat_cipher_alg ==
1603                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1604                 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1605                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1606                 cipher_cd_ctrl->cipher_state_sz =
1607                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1608                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1609                 cdesc->min_qat_dev_gen = QAT_GEN2;
1610         } else {
1611                 total_key_size = cipherkeylen;
1612                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1613                 qat_proto_flag =
1614                         qat_get_crypto_proto_flag(header->serv_specif_flags);
1615         }
1616         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1617         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1618         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1619
1620         header->service_cmd_id = cdesc->qat_cmd;
1621         qat_sym_session_init_common_hdr(header, qat_proto_flag);
1622
1623         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1624         cipher->cipher_config.val =
1625             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1626                                         cdesc->qat_cipher_alg, key_convert,
1627                                         cdesc->qat_dir);
1628
1629         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1630                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1631                                         sizeof(struct icp_qat_hw_cipher_config)
1632                                         + cipherkeylen);
1633                 memcpy(cipher->key, cipherkey, cipherkeylen);
1634                 memcpy(temp_key, cipherkey, cipherkeylen);
1635
1636                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1637                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1638                                                                 wordIndex++)
1639                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1640
1641                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1642                                         cipherkeylen + cipherkeylen;
1643         } else {
1644                 memcpy(cipher->key, cipherkey, cipherkeylen);
1645                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1646                                         cipherkeylen;
1647         }
1648
1649         if (total_key_size > cipherkeylen) {
1650                 uint32_t padding_size =  total_key_size-cipherkeylen;
1651                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1652                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1653                         /* K3 not provided so use K1 = K3*/
1654                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1655                 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1656                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1657                         /* K2 and K3 not provided so use K1 = K2 = K3*/
1658                         memcpy(cdesc->cd_cur_ptr, cipherkey,
1659                                 cipherkeylen);
1660                         memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1661                                 cipherkey, cipherkeylen);
1662                 } else
1663                         memset(cdesc->cd_cur_ptr, 0, padding_size);
1664
1665                 cdesc->cd_cur_ptr += padding_size;
1666         }
1667         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1668         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1669
1670         return 0;
1671 }
1672
1673 int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
1674                                                 const uint8_t *authkey,
1675                                                 uint32_t authkeylen,
1676                                                 uint32_t aad_length,
1677                                                 uint32_t digestsize,
1678                                                 unsigned int operation)
1679 {
1680         struct icp_qat_hw_auth_setup *hash;
1681         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1682         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1683         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1684         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1685         void *ptr = &req_tmpl->cd_ctrl;
1686         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1687         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1688         struct icp_qat_fw_la_auth_req_params *auth_param =
1689                 (struct icp_qat_fw_la_auth_req_params *)
1690                 ((char *)&req_tmpl->serv_specif_rqpars +
1691                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1692         uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1693         uint16_t hash_offset, cd_size;
1694         uint32_t *aad_len = NULL;
1695         uint32_t wordIndex  = 0;
1696         uint32_t *pTempKey;
1697         enum qat_sym_proto_flag qat_proto_flag =
1698                 QAT_CRYPTO_PROTO_FLAG_NONE;
1699
1700         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1701                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1702                                         ICP_QAT_FW_SLICE_AUTH);
1703                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1704                                         ICP_QAT_FW_SLICE_DRAM_WR);
1705                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1706         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1707                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1708                                 ICP_QAT_FW_SLICE_AUTH);
1709                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1710                                 ICP_QAT_FW_SLICE_CIPHER);
1711                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1712                                 ICP_QAT_FW_SLICE_CIPHER);
1713                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1714                                 ICP_QAT_FW_SLICE_DRAM_WR);
1715                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1716         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1717                 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1718                 return -EFAULT;
1719         }
1720
1721         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1722                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1723                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1724                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1725                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
1726                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
1727         } else {
1728                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1729                                            ICP_QAT_FW_LA_RET_AUTH_RES);
1730                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1731                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1732                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
1733         }
1734
1735         /*
1736          * Setup the inner hash config
1737          */
1738         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1739         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
1740         hash->auth_config.reserved = 0;
1741         hash->auth_config.config =
1742                         ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
1743                                 cdesc->qat_hash_alg, digestsize);
1744
1745         if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
1746                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
1747                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
1748                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
1749                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
1750                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
1751                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
1752                         )
1753                 hash->auth_counter.counter = 0;
1754         else {
1755                 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
1756
1757                 if (block_size < 0)
1758                         return block_size;
1759                 hash->auth_counter.counter = rte_bswap32(block_size);
1760         }
1761
1762         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
1763
1764         /*
1765          * cd_cur_ptr now points at the state1 information.
1766          */
1767         switch (cdesc->qat_hash_alg) {
1768         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1769                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1770                         /* Plain SHA-1 */
1771                         rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
1772                                         sizeof(sha1InitialState));
1773                         state1_size = qat_hash_get_state1_size(
1774                                         cdesc->qat_hash_alg);
1775                         break;
1776                 }
1777                 /* SHA-1 HMAC */
1778                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
1779                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1780                         cdesc->aes_cmac)) {
1781                         QAT_LOG(ERR, "(SHA)precompute failed");
1782                         return -EFAULT;
1783                 }
1784                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
1785                 break;
1786         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1787                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1788                         /* Plain SHA-224 */
1789                         rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
1790                                         sizeof(sha224InitialState));
1791                         state1_size = qat_hash_get_state1_size(
1792                                         cdesc->qat_hash_alg);
1793                         break;
1794                 }
1795                 /* SHA-224 HMAC */
1796                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
1797                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1798                         cdesc->aes_cmac)) {
1799                         QAT_LOG(ERR, "(SHA)precompute failed");
1800                         return -EFAULT;
1801                 }
1802                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
1803                 break;
1804         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1805                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1806                         /* Plain SHA-256 */
1807                         rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
1808                                         sizeof(sha256InitialState));
1809                         state1_size = qat_hash_get_state1_size(
1810                                         cdesc->qat_hash_alg);
1811                         break;
1812                 }
1813                 /* SHA-256 HMAC */
1814                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
1815                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1816                         cdesc->aes_cmac)) {
1817                         QAT_LOG(ERR, "(SHA)precompute failed");
1818                         return -EFAULT;
1819                 }
1820                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
1821                 break;
1822         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1823                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1824                         /* Plain SHA-384 */
1825                         rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
1826                                         sizeof(sha384InitialState));
1827                         state1_size = qat_hash_get_state1_size(
1828                                         cdesc->qat_hash_alg);
1829                         break;
1830                 }
1831                 /* SHA-384 HMAC */
1832                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
1833                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1834                         cdesc->aes_cmac)) {
1835                         QAT_LOG(ERR, "(SHA)precompute failed");
1836                         return -EFAULT;
1837                 }
1838                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
1839                 break;
1840         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1841                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1842                         /* Plain SHA-512 */
1843                         rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
1844                                         sizeof(sha512InitialState));
1845                         state1_size = qat_hash_get_state1_size(
1846                                         cdesc->qat_hash_alg);
1847                         break;
1848                 }
1849                 /* SHA-512 HMAC */
1850                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
1851                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1852                         cdesc->aes_cmac)) {
1853                         QAT_LOG(ERR, "(SHA)precompute failed");
1854                         return -EFAULT;
1855                 }
1856                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
1857                 break;
1858         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1859                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1860
1861                 if (cdesc->aes_cmac)
1862                         memset(cdesc->cd_cur_ptr, 0, state1_size);
1863                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
1864                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
1865                         &state2_size, cdesc->aes_cmac)) {
1866                         cdesc->aes_cmac ? QAT_LOG(ERR,
1867                                                   "(CMAC)precompute failed")
1868                                         : QAT_LOG(ERR,
1869                                                   "(XCBC)precompute failed");
1870                         return -EFAULT;
1871                 }
1872                 break;
1873         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1874         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1875                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1876                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
1877                 if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
1878                         authkeylen, cdesc->cd_cur_ptr + state1_size,
1879                         &state2_size, cdesc->aes_cmac)) {
1880                         QAT_LOG(ERR, "(GCM)precompute failed");
1881                         return -EFAULT;
1882                 }
1883                 /*
1884                  * Write (the length of AAD) into bytes 16-19 of state2
1885                  * in big-endian format. This field is 8 bytes
1886                  */
1887                 auth_param->u2.aad_sz =
1888                                 RTE_ALIGN_CEIL(aad_length, 16);
1889                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
1890
1891                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
1892                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
1893                                         ICP_QAT_HW_GALOIS_H_SZ);
1894                 *aad_len = rte_bswap32(aad_length);
1895                 cdesc->aad_len = aad_length;
1896                 break;
1897         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1898                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1899                 state1_size = qat_hash_get_state1_size(
1900                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
1901                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
1902                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1903
1904                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
1905                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
1906                 cipherconfig->cipher_config.val =
1907                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
1908                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
1909                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
1910                         ICP_QAT_HW_CIPHER_ENCRYPT);
1911                 memcpy(cipherconfig->key, authkey, authkeylen);
1912                 memset(cipherconfig->key + authkeylen,
1913                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
1914                 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
1915                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1916                 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1917                 break;
1918         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1919                 hash->auth_config.config =
1920                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
1921                                 cdesc->qat_hash_alg, digestsize);
1922                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1923                 state1_size = qat_hash_get_state1_size(
1924                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
1925                 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
1926                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
1927                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
1928
1929                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1930                 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1931                 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1932                 cdesc->min_qat_dev_gen = QAT_GEN2;
1933
1934                 break;
1935         case ICP_QAT_HW_AUTH_ALGO_MD5:
1936                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
1937                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1938                         cdesc->aes_cmac)) {
1939                         QAT_LOG(ERR, "(MD5)precompute failed");
1940                         return -EFAULT;
1941                 }
1942                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
1943                 break;
1944         case ICP_QAT_HW_AUTH_ALGO_NULL:
1945                 state1_size = qat_hash_get_state1_size(
1946                                 ICP_QAT_HW_AUTH_ALGO_NULL);
1947                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
1948                 break;
1949         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1950                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
1951                 state1_size = qat_hash_get_state1_size(
1952                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
1953                 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
1954                                 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
1955
1956                 if (aad_length > 0) {
1957                         aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
1958                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
1959                         auth_param->u2.aad_sz =
1960                         RTE_ALIGN_CEIL(aad_length,
1961                         ICP_QAT_HW_CCM_AAD_ALIGNMENT);
1962                 } else {
1963                         auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
1964                 }
1965                 cdesc->aad_len = aad_length;
1966                 hash->auth_counter.counter = 0;
1967
1968                 hash_cd_ctrl->outer_prefix_sz = digestsize;
1969                 auth_param->hash_state_sz = digestsize;
1970
1971                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1972                 break;
1973         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1974                 state1_size = qat_hash_get_state1_size(
1975                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
1976                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
1977                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1978                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
1979                                                         + authkeylen);
1980                 /*
1981                 * The Inner Hash Initial State2 block must contain IK
1982                 * (Initialisation Key), followed by IK XOR-ed with KM
1983                 * (Key Modifier): IK||(IK^KM).
1984                 */
1985                 /* write the auth key */
1986                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1987                 /* initialise temp key with auth key */
1988                 memcpy(pTempKey, authkey, authkeylen);
1989                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
1990                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
1991                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
1992                 break;
1993         default:
1994                 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
1995                 return -EFAULT;
1996         }
1997
1998         /* Request template setup */
1999         qat_sym_session_init_common_hdr(header, qat_proto_flag);
2000         header->service_cmd_id = cdesc->qat_cmd;
2001
2002         /* Auth CD config setup */
2003         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
2004         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
2005         hash_cd_ctrl->inner_res_sz = digestsize;
2006         hash_cd_ctrl->final_sz = digestsize;
2007         hash_cd_ctrl->inner_state1_sz = state1_size;
2008         auth_param->auth_res_sz = digestsize;
2009
2010         hash_cd_ctrl->inner_state2_sz  = state2_size;
2011         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2012                         ((sizeof(struct icp_qat_hw_auth_setup) +
2013                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2014                                         >> 3);
2015
2016         cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2017         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2018
2019         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2020         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2021
2022         return 0;
2023 }
2024
2025 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2026 {
2027         switch (key_len) {
2028         case ICP_QAT_HW_AES_128_KEY_SZ:
2029                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2030                 break;
2031         case ICP_QAT_HW_AES_192_KEY_SZ:
2032                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2033                 break;
2034         case ICP_QAT_HW_AES_256_KEY_SZ:
2035                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2036                 break;
2037         default:
2038                 return -EINVAL;
2039         }
2040         return 0;
2041 }
2042
2043 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2044                 enum icp_qat_hw_cipher_algo *alg)
2045 {
2046         switch (key_len) {
2047         case ICP_QAT_HW_AES_128_KEY_SZ:
2048                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2049                 break;
2050         case ICP_QAT_HW_AES_256_KEY_SZ:
2051                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2052                 break;
2053         default:
2054                 return -EINVAL;
2055         }
2056         return 0;
2057 }
2058
2059 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2060 {
2061         switch (key_len) {
2062         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2063                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2064                 break;
2065         default:
2066                 return -EINVAL;
2067         }
2068         return 0;
2069 }
2070
2071 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2072 {
2073         switch (key_len) {
2074         case ICP_QAT_HW_KASUMI_KEY_SZ:
2075                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2076                 break;
2077         default:
2078                 return -EINVAL;
2079         }
2080         return 0;
2081 }
2082
2083 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2084 {
2085         switch (key_len) {
2086         case ICP_QAT_HW_DES_KEY_SZ:
2087                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2088                 break;
2089         default:
2090                 return -EINVAL;
2091         }
2092         return 0;
2093 }
2094
2095 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2096 {
2097         switch (key_len) {
2098         case QAT_3DES_KEY_SZ_OPT1:
2099         case QAT_3DES_KEY_SZ_OPT2:
2100         case QAT_3DES_KEY_SZ_OPT3:
2101                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2102                 break;
2103         default:
2104                 return -EINVAL;
2105         }
2106         return 0;
2107 }
2108
2109 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2110 {
2111         switch (key_len) {
2112         case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2113                 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2114                 break;
2115         default:
2116                 return -EINVAL;
2117         }
2118         return 0;
2119 }
2120
2121 #ifdef RTE_LIB_SECURITY
2122 static int
2123 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2124 {
2125         struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2126         struct rte_security_docsis_xform *docsis = &conf->docsis;
2127
2128         /* CRC generate -> Cipher encrypt */
2129         if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2130
2131                 if (crypto_sym != NULL &&
2132                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2133                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2134                     crypto_sym->cipher.algo ==
2135                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2136                     (crypto_sym->cipher.key.length ==
2137                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2138                      crypto_sym->cipher.key.length ==
2139                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2140                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2141                     crypto_sym->next == NULL) {
2142                         return 0;
2143                 }
2144         /* Cipher decrypt -> CRC verify */
2145         } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2146
2147                 if (crypto_sym != NULL &&
2148                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2149                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2150                     crypto_sym->cipher.algo ==
2151                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2152                     (crypto_sym->cipher.key.length ==
2153                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2154                      crypto_sym->cipher.key.length ==
2155                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2156                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2157                     crypto_sym->next == NULL) {
2158                         return 0;
2159                 }
2160         }
2161
2162         return -EINVAL;
2163 }
2164
2165 static int
2166 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2167                 struct rte_security_session_conf *conf, void *session_private)
2168 {
2169         int ret;
2170         int qat_cmd_id;
2171         struct rte_crypto_sym_xform *xform = NULL;
2172         struct qat_sym_session *session = session_private;
2173
2174         /* Clear the session */
2175         memset(session, 0, qat_sym_session_get_private_size(dev));
2176
2177         ret = qat_sec_session_check_docsis(conf);
2178         if (ret) {
2179                 QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2180                 return ret;
2181         }
2182
2183         xform = conf->crypto_xform;
2184
2185         /* Verify the session physical address is known */
2186         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2187         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2188                 QAT_LOG(ERR,
2189                         "Session physical address unknown. Bad memory pool.");
2190                 return -EINVAL;
2191         }
2192
2193         /* Set context descriptor physical address */
2194         session->cd_paddr = session_paddr +
2195                         offsetof(struct qat_sym_session, cd);
2196
2197         session->min_qat_dev_gen = QAT_GEN1;
2198
2199         /* Get requested QAT command id - should be cipher */
2200         qat_cmd_id = qat_get_cmd_id(xform);
2201         if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2202                 QAT_LOG(ERR, "Unsupported xform chain requested");
2203                 return -ENOTSUP;
2204         }
2205         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2206
2207         ret = qat_sym_session_configure_cipher(dev, xform, session);
2208         if (ret < 0)
2209                 return ret;
2210
2211         return 0;
2212 }
2213
2214 int
2215 qat_security_session_create(void *dev,
2216                                 struct rte_security_session_conf *conf,
2217                                 struct rte_security_session *sess,
2218                                 struct rte_mempool *mempool)
2219 {
2220         void *sess_private_data;
2221         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2222         int ret;
2223
2224         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2225                         conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2226                 QAT_LOG(ERR, "Invalid security protocol");
2227                 return -EINVAL;
2228         }
2229
2230         if (rte_mempool_get(mempool, &sess_private_data)) {
2231                 QAT_LOG(ERR, "Couldn't get object from session mempool");
2232                 return -ENOMEM;
2233         }
2234
2235         ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2236                         sess_private_data);
2237         if (ret != 0) {
2238                 QAT_LOG(ERR, "Failed to configure session parameters");
2239                 /* Return session to mempool */
2240                 rte_mempool_put(mempool, sess_private_data);
2241                 return ret;
2242         }
2243
2244         set_sec_session_private_data(sess, sess_private_data);
2245
2246         return ret;
2247 }
2248
2249 int
2250 qat_security_session_destroy(void *dev __rte_unused,
2251                                  struct rte_security_session *sess)
2252 {
2253         void *sess_priv = get_sec_session_private_data(sess);
2254         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2255
2256         if (sess_priv) {
2257                 if (s->bpi_ctx)
2258                         bpi_cipher_ctx_free(s->bpi_ctx);
2259                 memset(s, 0, qat_sym_session_get_private_size(dev));
2260                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2261
2262                 set_sec_session_private_data(sess, NULL);
2263                 rte_mempool_put(sess_mp, sess_priv);
2264         }
2265         return 0;
2266 }
2267 #endif