net/ice/base: support IP fragment RSS and FDIR
[dpdk.git] / drivers / crypto / qat / qat_sym_session.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  * Copyright(c) 2015-2019 Intel Corporation
3  */
4
5 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
6 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
7 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
8 #include <openssl/evp.h>        /* Needed for bpi runt block processing */
9
10 #include <rte_memcpy.h>
11 #include <rte_common.h>
12 #include <rte_spinlock.h>
13 #include <rte_byteorder.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_crypto_sym.h>
17 #ifdef RTE_LIB_SECURITY
18 #include <rte_security.h>
19 #endif
20
21 #include "qat_logs.h"
22 #include "qat_sym_session.h"
23 #include "qat_sym_pmd.h"
24
25 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
26 static const uint8_t sha1InitialState[] = {
27         0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
28         0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
29
30 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
31 static const uint8_t sha224InitialState[] = {
32         0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
33         0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
34         0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
35
36 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
37 static const uint8_t sha256InitialState[] = {
38         0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
39         0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
40         0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
41
42 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
43 static const uint8_t sha384InitialState[] = {
44         0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
45         0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
46         0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
47         0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
48         0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
49         0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
50
51 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
52 static const uint8_t sha512InitialState[] = {
53         0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
54         0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
55         0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
56         0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
57         0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
58         0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
59
60 /** Frees a context previously created
61  *  Depends on openssl libcrypto
62  */
63 static void
64 bpi_cipher_ctx_free(void *bpi_ctx)
65 {
66         if (bpi_ctx != NULL)
67                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
68 }
69
70 /** Creates a context in either AES or DES in ECB mode
71  *  Depends on openssl libcrypto
72  */
73 static int
74 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
75                 enum rte_crypto_cipher_operation direction __rte_unused,
76                 const uint8_t *key, uint16_t key_length, void **ctx)
77 {
78         const EVP_CIPHER *algo = NULL;
79         int ret;
80         *ctx = EVP_CIPHER_CTX_new();
81
82         if (*ctx == NULL) {
83                 ret = -ENOMEM;
84                 goto ctx_init_err;
85         }
86
87         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
88                 algo = EVP_des_ecb();
89         else
90                 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
91                         algo = EVP_aes_128_ecb();
92                 else
93                         algo = EVP_aes_256_ecb();
94
95         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
96         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
97                 ret = -EINVAL;
98                 goto ctx_init_err;
99         }
100
101         return 0;
102
103 ctx_init_err:
104         if (*ctx != NULL)
105                 EVP_CIPHER_CTX_free(*ctx);
106         return ret;
107 }
108
109 static int
110 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
111                 struct qat_sym_dev_private *internals)
112 {
113         int i = 0;
114         const struct rte_cryptodev_capabilities *capability;
115
116         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
117                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
118                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
119                         continue;
120
121                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
122                         continue;
123
124                 if (capability->sym.cipher.algo == algo)
125                         return 1;
126         }
127         return 0;
128 }
129
130 static int
131 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
132                 struct qat_sym_dev_private *internals)
133 {
134         int i = 0;
135         const struct rte_cryptodev_capabilities *capability;
136
137         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
138                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
139                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
140                         continue;
141
142                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
143                         continue;
144
145                 if (capability->sym.auth.algo == algo)
146                         return 1;
147         }
148         return 0;
149 }
150
151 void
152 qat_sym_session_clear(struct rte_cryptodev *dev,
153                 struct rte_cryptodev_sym_session *sess)
154 {
155         uint8_t index = dev->driver_id;
156         void *sess_priv = get_sym_session_private_data(sess, index);
157         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
158
159         if (sess_priv) {
160                 if (s->bpi_ctx)
161                         bpi_cipher_ctx_free(s->bpi_ctx);
162                 memset(s, 0, qat_sym_session_get_private_size(dev));
163                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
164
165                 set_sym_session_private_data(sess, index, NULL);
166                 rte_mempool_put(sess_mp, sess_priv);
167         }
168 }
169
170 static int
171 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
172 {
173         /* Cipher Only */
174         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
175                 return ICP_QAT_FW_LA_CMD_CIPHER;
176
177         /* Authentication Only */
178         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
179                 return ICP_QAT_FW_LA_CMD_AUTH;
180
181         /* AEAD */
182         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
183                 /* AES-GCM and AES-CCM works with different direction
184                  * GCM first encrypts and generate hash where AES-CCM
185                  * first generate hash and encrypts. Similar relation
186                  * applies to decryption.
187                  */
188                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
189                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
190                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
191                         else
192                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
193                 else
194                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
195                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
196                         else
197                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
198         }
199
200         if (xform->next == NULL)
201                 return -1;
202
203         /* Cipher then Authenticate */
204         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
205                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
206                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
207
208         /* Authenticate then Cipher */
209         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
210                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
211                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
212
213         return -1;
214 }
215
216 static struct rte_crypto_auth_xform *
217 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
218 {
219         do {
220                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
221                         return &xform->auth;
222
223                 xform = xform->next;
224         } while (xform);
225
226         return NULL;
227 }
228
229 static struct rte_crypto_cipher_xform *
230 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
231 {
232         do {
233                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
234                         return &xform->cipher;
235
236                 xform = xform->next;
237         } while (xform);
238
239         return NULL;
240 }
241
242 int
243 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
244                 struct rte_crypto_sym_xform *xform,
245                 struct qat_sym_session *session)
246 {
247         struct qat_sym_dev_private *internals = dev->data->dev_private;
248         struct rte_crypto_cipher_xform *cipher_xform = NULL;
249         int ret;
250
251         /* Get cipher xform from crypto xform chain */
252         cipher_xform = qat_get_cipher_xform(xform);
253
254         session->cipher_iv.offset = cipher_xform->iv.offset;
255         session->cipher_iv.length = cipher_xform->iv.length;
256
257         switch (cipher_xform->algo) {
258         case RTE_CRYPTO_CIPHER_AES_CBC:
259                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
260                                 &session->qat_cipher_alg) != 0) {
261                         QAT_LOG(ERR, "Invalid AES cipher key size");
262                         ret = -EINVAL;
263                         goto error_out;
264                 }
265                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
266                 break;
267         case RTE_CRYPTO_CIPHER_AES_CTR:
268                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
269                                 &session->qat_cipher_alg) != 0) {
270                         QAT_LOG(ERR, "Invalid AES cipher key size");
271                         ret = -EINVAL;
272                         goto error_out;
273                 }
274                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
275                 break;
276         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
277                 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
278                                         &session->qat_cipher_alg) != 0) {
279                         QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
280                         ret = -EINVAL;
281                         goto error_out;
282                 }
283                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
284                 break;
285         case RTE_CRYPTO_CIPHER_NULL:
286                 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
287                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
288                 break;
289         case RTE_CRYPTO_CIPHER_KASUMI_F8:
290                 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
291                                         &session->qat_cipher_alg) != 0) {
292                         QAT_LOG(ERR, "Invalid KASUMI cipher key size");
293                         ret = -EINVAL;
294                         goto error_out;
295                 }
296                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
297                 break;
298         case RTE_CRYPTO_CIPHER_3DES_CBC:
299                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
300                                 &session->qat_cipher_alg) != 0) {
301                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
302                         ret = -EINVAL;
303                         goto error_out;
304                 }
305                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
306                 break;
307         case RTE_CRYPTO_CIPHER_DES_CBC:
308                 if (qat_sym_validate_des_key(cipher_xform->key.length,
309                                 &session->qat_cipher_alg) != 0) {
310                         QAT_LOG(ERR, "Invalid DES cipher key size");
311                         ret = -EINVAL;
312                         goto error_out;
313                 }
314                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
315                 break;
316         case RTE_CRYPTO_CIPHER_3DES_CTR:
317                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
318                                 &session->qat_cipher_alg) != 0) {
319                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
320                         ret = -EINVAL;
321                         goto error_out;
322                 }
323                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
324                 break;
325         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
326                 ret = bpi_cipher_ctx_init(
327                                         cipher_xform->algo,
328                                         cipher_xform->op,
329                                         cipher_xform->key.data,
330                                         cipher_xform->key.length,
331                                         &session->bpi_ctx);
332                 if (ret != 0) {
333                         QAT_LOG(ERR, "failed to create DES BPI ctx");
334                         goto error_out;
335                 }
336                 if (qat_sym_validate_des_key(cipher_xform->key.length,
337                                 &session->qat_cipher_alg) != 0) {
338                         QAT_LOG(ERR, "Invalid DES cipher key size");
339                         ret = -EINVAL;
340                         goto error_out;
341                 }
342                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
343                 break;
344         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
345                 ret = bpi_cipher_ctx_init(
346                                         cipher_xform->algo,
347                                         cipher_xform->op,
348                                         cipher_xform->key.data,
349                                         cipher_xform->key.length,
350                                         &session->bpi_ctx);
351                 if (ret != 0) {
352                         QAT_LOG(ERR, "failed to create AES BPI ctx");
353                         goto error_out;
354                 }
355                 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
356                                 &session->qat_cipher_alg) != 0) {
357                         QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
358                         ret = -EINVAL;
359                         goto error_out;
360                 }
361                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
362                 break;
363         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
364                 if (!qat_is_cipher_alg_supported(
365                         cipher_xform->algo, internals)) {
366                         QAT_LOG(ERR, "%s not supported on this device",
367                                 rte_crypto_cipher_algorithm_strings
368                                         [cipher_xform->algo]);
369                         ret = -ENOTSUP;
370                         goto error_out;
371                 }
372                 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
373                                 &session->qat_cipher_alg) != 0) {
374                         QAT_LOG(ERR, "Invalid ZUC cipher key size");
375                         ret = -EINVAL;
376                         goto error_out;
377                 }
378                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
379                 break;
380         case RTE_CRYPTO_CIPHER_AES_XTS:
381                 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
382                         QAT_LOG(ERR, "AES-XTS-192 not supported");
383                         ret = -EINVAL;
384                         goto error_out;
385                 }
386                 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
387                                 &session->qat_cipher_alg) != 0) {
388                         QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
389                         ret = -EINVAL;
390                         goto error_out;
391                 }
392                 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
393                 break;
394         case RTE_CRYPTO_CIPHER_3DES_ECB:
395         case RTE_CRYPTO_CIPHER_AES_ECB:
396         case RTE_CRYPTO_CIPHER_AES_F8:
397         case RTE_CRYPTO_CIPHER_ARC4:
398                 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
399                                 cipher_xform->algo);
400                 ret = -ENOTSUP;
401                 goto error_out;
402         default:
403                 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
404                                 cipher_xform->algo);
405                 ret = -EINVAL;
406                 goto error_out;
407         }
408
409         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
410                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
411         else
412                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
413
414         if (qat_sym_session_aead_create_cd_cipher(session,
415                                                 cipher_xform->key.data,
416                                                 cipher_xform->key.length)) {
417                 ret = -EINVAL;
418                 goto error_out;
419         }
420
421         return 0;
422
423 error_out:
424         if (session->bpi_ctx) {
425                 bpi_cipher_ctx_free(session->bpi_ctx);
426                 session->bpi_ctx = NULL;
427         }
428         return ret;
429 }
430
431 int
432 qat_sym_session_configure(struct rte_cryptodev *dev,
433                 struct rte_crypto_sym_xform *xform,
434                 struct rte_cryptodev_sym_session *sess,
435                 struct rte_mempool *mempool)
436 {
437         void *sess_private_data;
438         int ret;
439
440         if (rte_mempool_get(mempool, &sess_private_data)) {
441                 CDEV_LOG_ERR(
442                         "Couldn't get object from session mempool");
443                 return -ENOMEM;
444         }
445
446         ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
447         if (ret != 0) {
448                 QAT_LOG(ERR,
449                     "Crypto QAT PMD: failed to configure session parameters");
450
451                 /* Return session to mempool */
452                 rte_mempool_put(mempool, sess_private_data);
453                 return ret;
454         }
455
456         set_sym_session_private_data(sess, dev->driver_id,
457                 sess_private_data);
458
459         return 0;
460 }
461
462 static void
463 qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
464                 uint8_t hash_flag)
465 {
466         struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
467         struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
468                         (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
469                         session->fw_req.cd_ctrl.content_desc_ctrl_lw;
470
471         /* Set the Use Extended Protocol Flags bit in LW 1 */
472         QAT_FIELD_SET(header->comn_req_flags,
473                         QAT_COMN_EXT_FLAGS_USED,
474                         QAT_COMN_EXT_FLAGS_BITPOS,
475                         QAT_COMN_EXT_FLAGS_MASK);
476
477         /* Set Hash Flags in LW 28 */
478         cd_ctrl->hash_flags |= hash_flag;
479
480         /* Set proto flags in LW 1 */
481         switch (session->qat_cipher_alg) {
482         case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
483                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
484                                 ICP_QAT_FW_LA_SNOW_3G_PROTO);
485                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
486                                 header->serv_specif_flags, 0);
487                 break;
488         case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
489                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
490                                 ICP_QAT_FW_LA_NO_PROTO);
491                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
492                                 header->serv_specif_flags,
493                                 ICP_QAT_FW_LA_ZUC_3G_PROTO);
494                 break;
495         default:
496                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
497                                 ICP_QAT_FW_LA_NO_PROTO);
498                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
499                                 header->serv_specif_flags, 0);
500                 break;
501         }
502 }
503
504 static void
505 qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
506                 struct qat_sym_session *session)
507 {
508         const struct qat_sym_dev_private *qat_private = dev->data->dev_private;
509         enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
510                         QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
511
512         if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
513                         session->qat_cipher_alg !=
514                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
515                 session->min_qat_dev_gen = min_dev_gen;
516                 qat_sym_session_set_ext_hash_flags(session,
517                         1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
518         } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
519                         session->qat_cipher_alg !=
520                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
521                 session->min_qat_dev_gen = min_dev_gen;
522                 qat_sym_session_set_ext_hash_flags(session,
523                         1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
524         } else if ((session->aes_cmac ||
525                         session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
526                         (session->qat_cipher_alg ==
527                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
528                         session->qat_cipher_alg ==
529                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
530                 session->min_qat_dev_gen = min_dev_gen;
531                 qat_sym_session_set_ext_hash_flags(session, 0);
532         }
533 }
534
535 int
536 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
537                 struct rte_crypto_sym_xform *xform, void *session_private)
538 {
539         struct qat_sym_session *session = session_private;
540         struct qat_sym_dev_private *internals = dev->data->dev_private;
541         enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
542         int ret;
543         int qat_cmd_id;
544
545         /* Verify the session physical address is known */
546         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
547         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
548                 QAT_LOG(ERR,
549                         "Session physical address unknown. Bad memory pool.");
550                 return -EINVAL;
551         }
552
553         /* Set context descriptor physical address */
554         session->cd_paddr = session_paddr +
555                         offsetof(struct qat_sym_session, cd);
556
557         session->min_qat_dev_gen = QAT_GEN1;
558
559         /* Get requested QAT command id */
560         qat_cmd_id = qat_get_cmd_id(xform);
561         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
562                 QAT_LOG(ERR, "Unsupported xform chain requested");
563                 return -ENOTSUP;
564         }
565         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
566         switch (session->qat_cmd) {
567         case ICP_QAT_FW_LA_CMD_CIPHER:
568                 ret = qat_sym_session_configure_cipher(dev, xform, session);
569                 if (ret < 0)
570                         return ret;
571                 break;
572         case ICP_QAT_FW_LA_CMD_AUTH:
573                 ret = qat_sym_session_configure_auth(dev, xform, session);
574                 if (ret < 0)
575                         return ret;
576                 session->is_single_pass_gmac =
577                                qat_dev_gen == QAT_GEN3 &&
578                                xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
579                                xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
580                 break;
581         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
582                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
583                         ret = qat_sym_session_configure_aead(dev, xform,
584                                         session);
585                         if (ret < 0)
586                                 return ret;
587                 } else {
588                         ret = qat_sym_session_configure_cipher(dev,
589                                         xform, session);
590                         if (ret < 0)
591                                 return ret;
592                         ret = qat_sym_session_configure_auth(dev,
593                                         xform, session);
594                         if (ret < 0)
595                                 return ret;
596                         /* Special handling of mixed hash+cipher algorithms */
597                         qat_sym_session_handle_mixed(dev, session);
598                 }
599                 break;
600         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
601                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
602                         ret = qat_sym_session_configure_aead(dev, xform,
603                                         session);
604                         if (ret < 0)
605                                 return ret;
606                 } else {
607                         ret = qat_sym_session_configure_auth(dev,
608                                         xform, session);
609                         if (ret < 0)
610                                 return ret;
611                         ret = qat_sym_session_configure_cipher(dev,
612                                         xform, session);
613                         if (ret < 0)
614                                 return ret;
615                         /* Special handling of mixed hash+cipher algorithms */
616                         qat_sym_session_handle_mixed(dev, session);
617                 }
618                 break;
619         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
620         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
621         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
622         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
623         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
624         case ICP_QAT_FW_LA_CMD_MGF1:
625         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
626         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
627         case ICP_QAT_FW_LA_CMD_DELIMITER:
628         QAT_LOG(ERR, "Unsupported Service %u",
629                 session->qat_cmd);
630                 return -ENOTSUP;
631         default:
632         QAT_LOG(ERR, "Unsupported Service %u",
633                 session->qat_cmd);
634                 return -ENOTSUP;
635         }
636
637         return 0;
638 }
639
640 static int
641 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
642                 struct rte_crypto_aead_xform *aead_xform)
643 {
644         struct icp_qat_fw_la_cipher_req_params *cipher_param =
645                         (void *) &session->fw_req.serv_specif_rqpars;
646
647         session->is_single_pass = 1;
648         session->min_qat_dev_gen = QAT_GEN3;
649         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
650         if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
651                 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
652                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
653                         session->fw_req.comn_hdr.serv_specif_flags,
654                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
655         } else {
656                 /* Chacha-Poly is special case that use QAT CTR mode */
657                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
658         }
659         session->cipher_iv.offset = aead_xform->iv.offset;
660         session->cipher_iv.length = aead_xform->iv.length;
661         if (qat_sym_session_aead_create_cd_cipher(session,
662                         aead_xform->key.data, aead_xform->key.length))
663                 return -EINVAL;
664         session->aad_len = aead_xform->aad_length;
665         session->digest_length = aead_xform->digest_length;
666         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
667                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
668                 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
669                 ICP_QAT_FW_LA_RET_AUTH_SET(
670                         session->fw_req.comn_hdr.serv_specif_flags,
671                         ICP_QAT_FW_LA_RET_AUTH_RES);
672         } else {
673                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
674                 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
675                 ICP_QAT_FW_LA_CMP_AUTH_SET(
676                         session->fw_req.comn_hdr.serv_specif_flags,
677                         ICP_QAT_FW_LA_CMP_AUTH_RES);
678         }
679         ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
680                         session->fw_req.comn_hdr.serv_specif_flags,
681                         ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
682         ICP_QAT_FW_LA_PROTO_SET(
683                         session->fw_req.comn_hdr.serv_specif_flags,
684                         ICP_QAT_FW_LA_NO_PROTO);
685         session->fw_req.comn_hdr.service_cmd_id =
686                         ICP_QAT_FW_LA_CMD_CIPHER;
687         session->cd.cipher.cipher_config.val =
688                         ICP_QAT_HW_CIPHER_CONFIG_BUILD(
689                                 ICP_QAT_HW_CIPHER_AEAD_MODE,
690                                 session->qat_cipher_alg,
691                                 ICP_QAT_HW_CIPHER_NO_CONVERT,
692                                 session->qat_dir);
693         QAT_FIELD_SET(session->cd.cipher.cipher_config.val,
694                         aead_xform->digest_length,
695                         QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
696                         QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
697         session->cd.cipher.cipher_config.reserved =
698                         ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
699                                 aead_xform->aad_length);
700         cipher_param->spc_aad_sz = aead_xform->aad_length;
701         cipher_param->spc_auth_res_sz = aead_xform->digest_length;
702
703         return 0;
704 }
705
706 int
707 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
708                                 struct rte_crypto_sym_xform *xform,
709                                 struct qat_sym_session *session)
710 {
711         struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
712         struct qat_sym_dev_private *internals = dev->data->dev_private;
713         const uint8_t *key_data = auth_xform->key.data;
714         uint8_t key_length = auth_xform->key.length;
715
716         session->aes_cmac = 0;
717         session->auth_key_length = auth_xform->key.length;
718         session->auth_iv.offset = auth_xform->iv.offset;
719         session->auth_iv.length = auth_xform->iv.length;
720         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
721
722         switch (auth_xform->algo) {
723         case RTE_CRYPTO_AUTH_SHA1:
724                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
725                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
726                 break;
727         case RTE_CRYPTO_AUTH_SHA224:
728                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
729                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
730                 break;
731         case RTE_CRYPTO_AUTH_SHA256:
732                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
733                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
734                 break;
735         case RTE_CRYPTO_AUTH_SHA384:
736                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
737                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
738                 break;
739         case RTE_CRYPTO_AUTH_SHA512:
740                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
741                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
742                 break;
743         case RTE_CRYPTO_AUTH_SHA1_HMAC:
744                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
745                 break;
746         case RTE_CRYPTO_AUTH_SHA224_HMAC:
747                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
748                 break;
749         case RTE_CRYPTO_AUTH_SHA256_HMAC:
750                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
751                 break;
752         case RTE_CRYPTO_AUTH_SHA384_HMAC:
753                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
754                 break;
755         case RTE_CRYPTO_AUTH_SHA512_HMAC:
756                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
757                 break;
758         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
759                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
760                 break;
761         case RTE_CRYPTO_AUTH_AES_CMAC:
762                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
763                 session->aes_cmac = 1;
764                 break;
765         case RTE_CRYPTO_AUTH_AES_GMAC:
766                 if (qat_sym_validate_aes_key(auth_xform->key.length,
767                                 &session->qat_cipher_alg) != 0) {
768                         QAT_LOG(ERR, "Invalid AES key size");
769                         return -EINVAL;
770                 }
771                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
772                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
773                 if (session->auth_iv.length == 0)
774                         session->auth_iv.length = AES_GCM_J0_LEN;
775                 break;
776         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
777                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
778                 break;
779         case RTE_CRYPTO_AUTH_MD5_HMAC:
780                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
781                 break;
782         case RTE_CRYPTO_AUTH_NULL:
783                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
784                 break;
785         case RTE_CRYPTO_AUTH_KASUMI_F9:
786                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
787                 break;
788         case RTE_CRYPTO_AUTH_ZUC_EIA3:
789                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
790                         QAT_LOG(ERR, "%s not supported on this device",
791                                 rte_crypto_auth_algorithm_strings
792                                 [auth_xform->algo]);
793                         return -ENOTSUP;
794                 }
795                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
796                 break;
797         case RTE_CRYPTO_AUTH_MD5:
798         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
799                 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
800                                 auth_xform->algo);
801                 return -ENOTSUP;
802         default:
803                 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
804                                 auth_xform->algo);
805                 return -EINVAL;
806         }
807
808         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
809                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
810                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
811                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
812                         /*
813                          * It needs to create cipher desc content first,
814                          * then authentication
815                          */
816
817                         if (qat_sym_session_aead_create_cd_cipher(session,
818                                                 auth_xform->key.data,
819                                                 auth_xform->key.length))
820                                 return -EINVAL;
821
822                         if (qat_sym_session_aead_create_cd_auth(session,
823                                                 key_data,
824                                                 key_length,
825                                                 0,
826                                                 auth_xform->digest_length,
827                                                 auth_xform->op))
828                                 return -EINVAL;
829                 } else {
830                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
831                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
832                         /*
833                          * It needs to create authentication desc content first,
834                          * then cipher
835                          */
836
837                         if (qat_sym_session_aead_create_cd_auth(session,
838                                         key_data,
839                                         key_length,
840                                         0,
841                                         auth_xform->digest_length,
842                                         auth_xform->op))
843                                 return -EINVAL;
844
845                         if (qat_sym_session_aead_create_cd_cipher(session,
846                                                 auth_xform->key.data,
847                                                 auth_xform->key.length))
848                                 return -EINVAL;
849                 }
850                 /* Restore to authentication only only */
851                 session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
852         } else {
853                 if (qat_sym_session_aead_create_cd_auth(session,
854                                 key_data,
855                                 key_length,
856                                 0,
857                                 auth_xform->digest_length,
858                                 auth_xform->op))
859                         return -EINVAL;
860         }
861
862         session->digest_length = auth_xform->digest_length;
863         return 0;
864 }
865
866 int
867 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
868                                 struct rte_crypto_sym_xform *xform,
869                                 struct qat_sym_session *session)
870 {
871         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
872         enum rte_crypto_auth_operation crypto_operation;
873         struct qat_sym_dev_private *internals =
874                         dev->data->dev_private;
875         enum qat_device_gen qat_dev_gen =
876                         internals->qat_dev->qat_dev_gen;
877
878         /*
879          * Store AEAD IV parameters as cipher IV,
880          * to avoid unnecessary memory usage
881          */
882         session->cipher_iv.offset = xform->aead.iv.offset;
883         session->cipher_iv.length = xform->aead.iv.length;
884
885         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
886
887         session->is_single_pass = 0;
888         switch (aead_xform->algo) {
889         case RTE_CRYPTO_AEAD_AES_GCM:
890                 if (qat_sym_validate_aes_key(aead_xform->key.length,
891                                 &session->qat_cipher_alg) != 0) {
892                         QAT_LOG(ERR, "Invalid AES key size");
893                         return -EINVAL;
894                 }
895                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
896                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
897                 if (qat_dev_gen > QAT_GEN2 && aead_xform->iv.length ==
898                                 QAT_AES_GCM_SPC_IV_SIZE) {
899                         return qat_sym_session_handle_single_pass(session,
900                                         aead_xform);
901                 }
902                 if (session->cipher_iv.length == 0)
903                         session->cipher_iv.length = AES_GCM_J0_LEN;
904
905                 break;
906         case RTE_CRYPTO_AEAD_AES_CCM:
907                 if (qat_sym_validate_aes_key(aead_xform->key.length,
908                                 &session->qat_cipher_alg) != 0) {
909                         QAT_LOG(ERR, "Invalid AES key size");
910                         return -EINVAL;
911                 }
912                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
913                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
914                 break;
915         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
916                 if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
917                         return -EINVAL;
918                 session->qat_cipher_alg =
919                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
920                 return qat_sym_session_handle_single_pass(session,
921                                                 aead_xform);
922         default:
923                 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
924                                 aead_xform->algo);
925                 return -EINVAL;
926         }
927
928         if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
929                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
930                         (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
931                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
932                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
933                 /*
934                  * It needs to create cipher desc content first,
935                  * then authentication
936                  */
937                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
938                         RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
939
940                 if (qat_sym_session_aead_create_cd_cipher(session,
941                                         aead_xform->key.data,
942                                         aead_xform->key.length))
943                         return -EINVAL;
944
945                 if (qat_sym_session_aead_create_cd_auth(session,
946                                         aead_xform->key.data,
947                                         aead_xform->key.length,
948                                         aead_xform->aad_length,
949                                         aead_xform->digest_length,
950                                         crypto_operation))
951                         return -EINVAL;
952         } else {
953                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
954                 /*
955                  * It needs to create authentication desc content first,
956                  * then cipher
957                  */
958
959                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
960                         RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
961
962                 if (qat_sym_session_aead_create_cd_auth(session,
963                                         aead_xform->key.data,
964                                         aead_xform->key.length,
965                                         aead_xform->aad_length,
966                                         aead_xform->digest_length,
967                                         crypto_operation))
968                         return -EINVAL;
969
970                 if (qat_sym_session_aead_create_cd_cipher(session,
971                                         aead_xform->key.data,
972                                         aead_xform->key.length))
973                         return -EINVAL;
974         }
975
976         session->digest_length = aead_xform->digest_length;
977         return 0;
978 }
979
980 unsigned int qat_sym_session_get_private_size(
981                 struct rte_cryptodev *dev __rte_unused)
982 {
983         return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
984 }
985
986 /* returns block size in bytes per cipher algo */
987 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
988 {
989         switch (qat_cipher_alg) {
990         case ICP_QAT_HW_CIPHER_ALGO_DES:
991                 return ICP_QAT_HW_DES_BLK_SZ;
992         case ICP_QAT_HW_CIPHER_ALGO_3DES:
993                 return ICP_QAT_HW_3DES_BLK_SZ;
994         case ICP_QAT_HW_CIPHER_ALGO_AES128:
995         case ICP_QAT_HW_CIPHER_ALGO_AES192:
996         case ICP_QAT_HW_CIPHER_ALGO_AES256:
997                 return ICP_QAT_HW_AES_BLK_SZ;
998         default:
999                 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
1000                 return -EFAULT;
1001         };
1002         return -EFAULT;
1003 }
1004
1005 /*
1006  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
1007  * This is digest size rounded up to nearest quadword
1008  */
1009 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1010 {
1011         switch (qat_hash_alg) {
1012         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1013                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
1014                                                 QAT_HW_DEFAULT_ALIGNMENT);
1015         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1016                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
1017                                                 QAT_HW_DEFAULT_ALIGNMENT);
1018         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1019                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
1020                                                 QAT_HW_DEFAULT_ALIGNMENT);
1021         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1022                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
1023                                                 QAT_HW_DEFAULT_ALIGNMENT);
1024         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1025                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1026                                                 QAT_HW_DEFAULT_ALIGNMENT);
1027         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1028                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
1029                                                 QAT_HW_DEFAULT_ALIGNMENT);
1030         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1031         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1032                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1033                                                 QAT_HW_DEFAULT_ALIGNMENT);
1034         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1035                 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1036                                                 QAT_HW_DEFAULT_ALIGNMENT);
1037         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1038                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1039                                                 QAT_HW_DEFAULT_ALIGNMENT);
1040         case ICP_QAT_HW_AUTH_ALGO_MD5:
1041                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1042                                                 QAT_HW_DEFAULT_ALIGNMENT);
1043         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1044                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1045                                                 QAT_HW_DEFAULT_ALIGNMENT);
1046         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1047                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1048                                                 QAT_HW_DEFAULT_ALIGNMENT);
1049         case ICP_QAT_HW_AUTH_ALGO_NULL:
1050                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1051                                                 QAT_HW_DEFAULT_ALIGNMENT);
1052         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1053                 /* return maximum state1 size in this case */
1054                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1055                                                 QAT_HW_DEFAULT_ALIGNMENT);
1056         default:
1057                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1058                 return -EFAULT;
1059         };
1060         return -EFAULT;
1061 }
1062
1063 /* returns digest size in bytes  per hash algo */
1064 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1065 {
1066         switch (qat_hash_alg) {
1067         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1068                 return ICP_QAT_HW_SHA1_STATE1_SZ;
1069         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1070                 return ICP_QAT_HW_SHA224_STATE1_SZ;
1071         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1072                 return ICP_QAT_HW_SHA256_STATE1_SZ;
1073         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1074                 return ICP_QAT_HW_SHA384_STATE1_SZ;
1075         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1076                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1077         case ICP_QAT_HW_AUTH_ALGO_MD5:
1078                 return ICP_QAT_HW_MD5_STATE1_SZ;
1079         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1080                 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1081         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1082                 /* return maximum digest size in this case */
1083                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1084         default:
1085                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1086                 return -EFAULT;
1087         };
1088         return -EFAULT;
1089 }
1090
1091 /* returns block size in byes per hash algo */
1092 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1093 {
1094         switch (qat_hash_alg) {
1095         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1096                 return SHA_CBLOCK;
1097         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1098                 return SHA256_CBLOCK;
1099         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1100                 return SHA256_CBLOCK;
1101         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1102                 return SHA512_CBLOCK;
1103         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1104                 return SHA512_CBLOCK;
1105         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1106                 return 16;
1107         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1108                 return ICP_QAT_HW_AES_BLK_SZ;
1109         case ICP_QAT_HW_AUTH_ALGO_MD5:
1110                 return MD5_CBLOCK;
1111         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1112                 /* return maximum block size in this case */
1113                 return SHA512_CBLOCK;
1114         default:
1115                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1116                 return -EFAULT;
1117         };
1118         return -EFAULT;
1119 }
1120
1121 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1122 {
1123         SHA_CTX ctx;
1124
1125         if (!SHA1_Init(&ctx))
1126                 return -EFAULT;
1127         SHA1_Transform(&ctx, data_in);
1128         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1129         return 0;
1130 }
1131
1132 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1133 {
1134         SHA256_CTX ctx;
1135
1136         if (!SHA224_Init(&ctx))
1137                 return -EFAULT;
1138         SHA256_Transform(&ctx, data_in);
1139         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1140         return 0;
1141 }
1142
1143 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1144 {
1145         SHA256_CTX ctx;
1146
1147         if (!SHA256_Init(&ctx))
1148                 return -EFAULT;
1149         SHA256_Transform(&ctx, data_in);
1150         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1151         return 0;
1152 }
1153
1154 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1155 {
1156         SHA512_CTX ctx;
1157
1158         if (!SHA384_Init(&ctx))
1159                 return -EFAULT;
1160         SHA512_Transform(&ctx, data_in);
1161         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1162         return 0;
1163 }
1164
1165 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1166 {
1167         SHA512_CTX ctx;
1168
1169         if (!SHA512_Init(&ctx))
1170                 return -EFAULT;
1171         SHA512_Transform(&ctx, data_in);
1172         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1173         return 0;
1174 }
1175
1176 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1177 {
1178         MD5_CTX ctx;
1179
1180         if (!MD5_Init(&ctx))
1181                 return -EFAULT;
1182         MD5_Transform(&ctx, data_in);
1183         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1184
1185         return 0;
1186 }
1187
1188 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1189                         uint8_t *data_in,
1190                         uint8_t *data_out)
1191 {
1192         int digest_size;
1193         uint8_t digest[qat_hash_get_digest_size(
1194                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1195         uint32_t *hash_state_out_be32;
1196         uint64_t *hash_state_out_be64;
1197         int i;
1198
1199         digest_size = qat_hash_get_digest_size(hash_alg);
1200         if (digest_size <= 0)
1201                 return -EFAULT;
1202
1203         hash_state_out_be32 = (uint32_t *)data_out;
1204         hash_state_out_be64 = (uint64_t *)data_out;
1205
1206         switch (hash_alg) {
1207         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1208                 if (partial_hash_sha1(data_in, digest))
1209                         return -EFAULT;
1210                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1211                         *hash_state_out_be32 =
1212                                 rte_bswap32(*(((uint32_t *)digest)+i));
1213                 break;
1214         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1215                 if (partial_hash_sha224(data_in, digest))
1216                         return -EFAULT;
1217                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1218                         *hash_state_out_be32 =
1219                                 rte_bswap32(*(((uint32_t *)digest)+i));
1220                 break;
1221         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1222                 if (partial_hash_sha256(data_in, digest))
1223                         return -EFAULT;
1224                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1225                         *hash_state_out_be32 =
1226                                 rte_bswap32(*(((uint32_t *)digest)+i));
1227                 break;
1228         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1229                 if (partial_hash_sha384(data_in, digest))
1230                         return -EFAULT;
1231                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1232                         *hash_state_out_be64 =
1233                                 rte_bswap64(*(((uint64_t *)digest)+i));
1234                 break;
1235         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1236                 if (partial_hash_sha512(data_in, digest))
1237                         return -EFAULT;
1238                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1239                         *hash_state_out_be64 =
1240                                 rte_bswap64(*(((uint64_t *)digest)+i));
1241                 break;
1242         case ICP_QAT_HW_AUTH_ALGO_MD5:
1243                 if (partial_hash_md5(data_in, data_out))
1244                         return -EFAULT;
1245                 break;
1246         default:
1247                 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1248                 return -EFAULT;
1249         }
1250
1251         return 0;
1252 }
1253 #define HMAC_IPAD_VALUE 0x36
1254 #define HMAC_OPAD_VALUE 0x5c
1255 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1256
1257 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1258
1259 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1260 {
1261         int i;
1262
1263         derived[0] = base[0] << 1;
1264         for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1265                 derived[i] = base[i] << 1;
1266                 derived[i - 1] |= base[i] >> 7;
1267         }
1268
1269         if (base[0] & 0x80)
1270                 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1271 }
1272
1273 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1274                                 const uint8_t *auth_key,
1275                                 uint16_t auth_keylen,
1276                                 uint8_t *p_state_buf,
1277                                 uint16_t *p_state_len,
1278                                 uint8_t aes_cmac)
1279 {
1280         int block_size;
1281         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1282         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1283         int i;
1284
1285         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1286
1287                 /* CMAC */
1288                 if (aes_cmac) {
1289                         AES_KEY enc_key;
1290                         uint8_t *in = NULL;
1291                         uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1292                         uint8_t *k1, *k2;
1293
1294                         auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1295
1296                         in = rte_zmalloc("AES CMAC K1",
1297                                          ICP_QAT_HW_AES_128_KEY_SZ, 16);
1298
1299                         if (in == NULL) {
1300                                 QAT_LOG(ERR, "Failed to alloc memory");
1301                                 return -ENOMEM;
1302                         }
1303
1304                         rte_memcpy(in, AES_CMAC_SEED,
1305                                    ICP_QAT_HW_AES_128_KEY_SZ);
1306                         rte_memcpy(p_state_buf, auth_key, auth_keylen);
1307
1308                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1309                                 &enc_key) != 0) {
1310                                 rte_free(in);
1311                                 return -EFAULT;
1312                         }
1313
1314                         AES_encrypt(in, k0, &enc_key);
1315
1316                         k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1317                         k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1318
1319                         aes_cmac_key_derive(k0, k1);
1320                         aes_cmac_key_derive(k1, k2);
1321
1322                         memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1323                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1324                         rte_free(in);
1325                         return 0;
1326                 } else {
1327                         static uint8_t qat_aes_xcbc_key_seed[
1328                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1329                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1330                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1331                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1332                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1333                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1334                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1335                         };
1336
1337                         uint8_t *in = NULL;
1338                         uint8_t *out = p_state_buf;
1339                         int x;
1340                         AES_KEY enc_key;
1341
1342                         in = rte_zmalloc("working mem for key",
1343                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1344                         if (in == NULL) {
1345                                 QAT_LOG(ERR, "Failed to alloc memory");
1346                                 return -ENOMEM;
1347                         }
1348
1349                         rte_memcpy(in, qat_aes_xcbc_key_seed,
1350                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1351                         for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1352                                 if (AES_set_encrypt_key(auth_key,
1353                                                         auth_keylen << 3,
1354                                                         &enc_key) != 0) {
1355                                         rte_free(in -
1356                                           (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1357                                         memset(out -
1358                                            (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1359                                           0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1360                                         return -EFAULT;
1361                                 }
1362                                 AES_encrypt(in, out, &enc_key);
1363                                 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1364                                 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1365                         }
1366                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1367                         rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1368                         return 0;
1369                 }
1370
1371         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1372                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1373                 uint8_t *in = NULL;
1374                 uint8_t *out = p_state_buf;
1375                 AES_KEY enc_key;
1376
1377                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1378                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1379                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1380                 in = rte_zmalloc("working mem for key",
1381                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
1382                 if (in == NULL) {
1383                         QAT_LOG(ERR, "Failed to alloc memory");
1384                         return -ENOMEM;
1385                 }
1386
1387                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1388                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1389                         &enc_key) != 0) {
1390                         return -EFAULT;
1391                 }
1392                 AES_encrypt(in, out, &enc_key);
1393                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1394                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1395                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1396                 rte_free(in);
1397                 return 0;
1398         }
1399
1400         block_size = qat_hash_get_block_size(hash_alg);
1401         if (block_size < 0)
1402                 return block_size;
1403         /* init ipad and opad from key and xor with fixed values */
1404         memset(ipad, 0, block_size);
1405         memset(opad, 0, block_size);
1406
1407         if (auth_keylen > (unsigned int)block_size) {
1408                 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1409                 return -EFAULT;
1410         }
1411         rte_memcpy(ipad, auth_key, auth_keylen);
1412         rte_memcpy(opad, auth_key, auth_keylen);
1413
1414         for (i = 0; i < block_size; i++) {
1415                 uint8_t *ipad_ptr = ipad + i;
1416                 uint8_t *opad_ptr = opad + i;
1417                 *ipad_ptr ^= HMAC_IPAD_VALUE;
1418                 *opad_ptr ^= HMAC_OPAD_VALUE;
1419         }
1420
1421         /* do partial hash of ipad and copy to state1 */
1422         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1423                 memset(ipad, 0, block_size);
1424                 memset(opad, 0, block_size);
1425                 QAT_LOG(ERR, "ipad precompute failed");
1426                 return -EFAULT;
1427         }
1428
1429         /*
1430          * State len is a multiple of 8, so may be larger than the digest.
1431          * Put the partial hash of opad state_len bytes after state1
1432          */
1433         *p_state_len = qat_hash_get_state1_size(hash_alg);
1434         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1435                 memset(ipad, 0, block_size);
1436                 memset(opad, 0, block_size);
1437                 QAT_LOG(ERR, "opad precompute failed");
1438                 return -EFAULT;
1439         }
1440
1441         /*  don't leave data lying around */
1442         memset(ipad, 0, block_size);
1443         memset(opad, 0, block_size);
1444         return 0;
1445 }
1446
1447 static void
1448 qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
1449                 enum qat_sym_proto_flag proto_flags)
1450 {
1451         header->hdr_flags =
1452                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1453         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1454         header->comn_req_flags =
1455                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1456                                         QAT_COMN_PTR_TYPE_FLAT);
1457         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1458                                   ICP_QAT_FW_LA_PARTIAL_NONE);
1459         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1460                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1461
1462         switch (proto_flags)            {
1463         case QAT_CRYPTO_PROTO_FLAG_NONE:
1464                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1465                                         ICP_QAT_FW_LA_NO_PROTO);
1466                 break;
1467         case QAT_CRYPTO_PROTO_FLAG_CCM:
1468                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1469                                         ICP_QAT_FW_LA_CCM_PROTO);
1470                 break;
1471         case QAT_CRYPTO_PROTO_FLAG_GCM:
1472                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1473                                         ICP_QAT_FW_LA_GCM_PROTO);
1474                 break;
1475         case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1476                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1477                                         ICP_QAT_FW_LA_SNOW_3G_PROTO);
1478                 break;
1479         case QAT_CRYPTO_PROTO_FLAG_ZUC:
1480                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1481                         ICP_QAT_FW_LA_ZUC_3G_PROTO);
1482                 break;
1483         }
1484
1485         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1486                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
1487         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1488                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1489 }
1490
1491 /*
1492  *      Snow3G and ZUC should never use this function
1493  *      and set its protocol flag in both cipher and auth part of content
1494  *      descriptor building function
1495  */
1496 static enum qat_sym_proto_flag
1497 qat_get_crypto_proto_flag(uint16_t flags)
1498 {
1499         int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
1500         enum qat_sym_proto_flag qat_proto_flag =
1501                         QAT_CRYPTO_PROTO_FLAG_NONE;
1502
1503         switch (proto) {
1504         case ICP_QAT_FW_LA_GCM_PROTO:
1505                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1506                 break;
1507         case ICP_QAT_FW_LA_CCM_PROTO:
1508                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
1509                 break;
1510         }
1511
1512         return qat_proto_flag;
1513 }
1514
1515 int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
1516                                                 const uint8_t *cipherkey,
1517                                                 uint32_t cipherkeylen)
1518 {
1519         struct icp_qat_hw_cipher_algo_blk *cipher;
1520         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1521         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1522         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1523         void *ptr = &req_tmpl->cd_ctrl;
1524         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1525         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1526         enum icp_qat_hw_cipher_convert key_convert;
1527         enum qat_sym_proto_flag qat_proto_flag =
1528                 QAT_CRYPTO_PROTO_FLAG_NONE;
1529         uint32_t total_key_size;
1530         uint16_t cipher_offset, cd_size;
1531         uint32_t wordIndex  = 0;
1532         uint32_t *temp_key = NULL;
1533
1534         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1535                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1536                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1537                                         ICP_QAT_FW_SLICE_CIPHER);
1538                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1539                                         ICP_QAT_FW_SLICE_DRAM_WR);
1540                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1541                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1542                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1543                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1544                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1545         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1546                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1547                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1548                                         ICP_QAT_FW_SLICE_CIPHER);
1549                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1550                                         ICP_QAT_FW_SLICE_AUTH);
1551                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1552                                         ICP_QAT_FW_SLICE_AUTH);
1553                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1554                                         ICP_QAT_FW_SLICE_DRAM_WR);
1555                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1556         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1557                 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1558                 return -EFAULT;
1559         }
1560
1561         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1562                 /*
1563                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
1564                  * Overriding default values previously set
1565                  */
1566                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1567                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1568         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1569                 || cdesc->qat_cipher_alg ==
1570                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1571                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1572         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1573                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1574         else
1575                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1576
1577         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1578                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1579                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1580                 cipher_cd_ctrl->cipher_state_sz =
1581                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1582                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1583
1584         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1585                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1586                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1587                 cipher_cd_ctrl->cipher_padding_sz =
1588                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1589         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1590                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1591                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1592                 qat_proto_flag =
1593                         qat_get_crypto_proto_flag(header->serv_specif_flags);
1594         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1595                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1596                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1597                 qat_proto_flag =
1598                         qat_get_crypto_proto_flag(header->serv_specif_flags);
1599         } else if (cdesc->qat_cipher_alg ==
1600                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1601                 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1602                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1603                 cipher_cd_ctrl->cipher_state_sz =
1604                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1605                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1606                 cdesc->min_qat_dev_gen = QAT_GEN2;
1607         } else {
1608                 total_key_size = cipherkeylen;
1609                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1610                 qat_proto_flag =
1611                         qat_get_crypto_proto_flag(header->serv_specif_flags);
1612         }
1613         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1614         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1615         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1616
1617         header->service_cmd_id = cdesc->qat_cmd;
1618         qat_sym_session_init_common_hdr(header, qat_proto_flag);
1619
1620         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1621         cipher->cipher_config.val =
1622             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1623                                         cdesc->qat_cipher_alg, key_convert,
1624                                         cdesc->qat_dir);
1625
1626         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1627                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1628                                         sizeof(struct icp_qat_hw_cipher_config)
1629                                         + cipherkeylen);
1630                 memcpy(cipher->key, cipherkey, cipherkeylen);
1631                 memcpy(temp_key, cipherkey, cipherkeylen);
1632
1633                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1634                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1635                                                                 wordIndex++)
1636                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1637
1638                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1639                                         cipherkeylen + cipherkeylen;
1640         } else {
1641                 memcpy(cipher->key, cipherkey, cipherkeylen);
1642                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1643                                         cipherkeylen;
1644         }
1645
1646         if (total_key_size > cipherkeylen) {
1647                 uint32_t padding_size =  total_key_size-cipherkeylen;
1648                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1649                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1650                         /* K3 not provided so use K1 = K3*/
1651                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1652                 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1653                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1654                         /* K2 and K3 not provided so use K1 = K2 = K3*/
1655                         memcpy(cdesc->cd_cur_ptr, cipherkey,
1656                                 cipherkeylen);
1657                         memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1658                                 cipherkey, cipherkeylen);
1659                 } else
1660                         memset(cdesc->cd_cur_ptr, 0, padding_size);
1661
1662                 cdesc->cd_cur_ptr += padding_size;
1663         }
1664         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1665         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1666
1667         return 0;
1668 }
1669
1670 int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
1671                                                 const uint8_t *authkey,
1672                                                 uint32_t authkeylen,
1673                                                 uint32_t aad_length,
1674                                                 uint32_t digestsize,
1675                                                 unsigned int operation)
1676 {
1677         struct icp_qat_hw_auth_setup *hash;
1678         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1679         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1680         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1681         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1682         void *ptr = &req_tmpl->cd_ctrl;
1683         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1684         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1685         struct icp_qat_fw_la_auth_req_params *auth_param =
1686                 (struct icp_qat_fw_la_auth_req_params *)
1687                 ((char *)&req_tmpl->serv_specif_rqpars +
1688                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1689         uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1690         uint16_t hash_offset, cd_size;
1691         uint32_t *aad_len = NULL;
1692         uint32_t wordIndex  = 0;
1693         uint32_t *pTempKey;
1694         enum qat_sym_proto_flag qat_proto_flag =
1695                 QAT_CRYPTO_PROTO_FLAG_NONE;
1696
1697         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1698                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1699                                         ICP_QAT_FW_SLICE_AUTH);
1700                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1701                                         ICP_QAT_FW_SLICE_DRAM_WR);
1702                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1703         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1704                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1705                                 ICP_QAT_FW_SLICE_AUTH);
1706                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1707                                 ICP_QAT_FW_SLICE_CIPHER);
1708                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1709                                 ICP_QAT_FW_SLICE_CIPHER);
1710                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1711                                 ICP_QAT_FW_SLICE_DRAM_WR);
1712                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1713         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1714                 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1715                 return -EFAULT;
1716         }
1717
1718         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1719                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1720                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1721                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1722                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
1723                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
1724         } else {
1725                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1726                                            ICP_QAT_FW_LA_RET_AUTH_RES);
1727                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1728                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1729                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
1730         }
1731
1732         /*
1733          * Setup the inner hash config
1734          */
1735         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1736         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
1737         hash->auth_config.reserved = 0;
1738         hash->auth_config.config =
1739                         ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
1740                                 cdesc->qat_hash_alg, digestsize);
1741
1742         if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
1743                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
1744                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
1745                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
1746                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
1747                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
1748                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
1749                         )
1750                 hash->auth_counter.counter = 0;
1751         else {
1752                 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
1753
1754                 if (block_size < 0)
1755                         return block_size;
1756                 hash->auth_counter.counter = rte_bswap32(block_size);
1757         }
1758
1759         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
1760
1761         /*
1762          * cd_cur_ptr now points at the state1 information.
1763          */
1764         switch (cdesc->qat_hash_alg) {
1765         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1766                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1767                         /* Plain SHA-1 */
1768                         rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
1769                                         sizeof(sha1InitialState));
1770                         state1_size = qat_hash_get_state1_size(
1771                                         cdesc->qat_hash_alg);
1772                         break;
1773                 }
1774                 /* SHA-1 HMAC */
1775                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
1776                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1777                         cdesc->aes_cmac)) {
1778                         QAT_LOG(ERR, "(SHA)precompute failed");
1779                         return -EFAULT;
1780                 }
1781                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
1782                 break;
1783         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1784                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1785                         /* Plain SHA-224 */
1786                         rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
1787                                         sizeof(sha224InitialState));
1788                         state1_size = qat_hash_get_state1_size(
1789                                         cdesc->qat_hash_alg);
1790                         break;
1791                 }
1792                 /* SHA-224 HMAC */
1793                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
1794                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1795                         cdesc->aes_cmac)) {
1796                         QAT_LOG(ERR, "(SHA)precompute failed");
1797                         return -EFAULT;
1798                 }
1799                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
1800                 break;
1801         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1802                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1803                         /* Plain SHA-256 */
1804                         rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
1805                                         sizeof(sha256InitialState));
1806                         state1_size = qat_hash_get_state1_size(
1807                                         cdesc->qat_hash_alg);
1808                         break;
1809                 }
1810                 /* SHA-256 HMAC */
1811                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
1812                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1813                         cdesc->aes_cmac)) {
1814                         QAT_LOG(ERR, "(SHA)precompute failed");
1815                         return -EFAULT;
1816                 }
1817                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
1818                 break;
1819         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1820                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1821                         /* Plain SHA-384 */
1822                         rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
1823                                         sizeof(sha384InitialState));
1824                         state1_size = qat_hash_get_state1_size(
1825                                         cdesc->qat_hash_alg);
1826                         break;
1827                 }
1828                 /* SHA-384 HMAC */
1829                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
1830                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1831                         cdesc->aes_cmac)) {
1832                         QAT_LOG(ERR, "(SHA)precompute failed");
1833                         return -EFAULT;
1834                 }
1835                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
1836                 break;
1837         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1838                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1839                         /* Plain SHA-512 */
1840                         rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
1841                                         sizeof(sha512InitialState));
1842                         state1_size = qat_hash_get_state1_size(
1843                                         cdesc->qat_hash_alg);
1844                         break;
1845                 }
1846                 /* SHA-512 HMAC */
1847                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
1848                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1849                         cdesc->aes_cmac)) {
1850                         QAT_LOG(ERR, "(SHA)precompute failed");
1851                         return -EFAULT;
1852                 }
1853                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
1854                 break;
1855         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1856                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1857
1858                 if (cdesc->aes_cmac)
1859                         memset(cdesc->cd_cur_ptr, 0, state1_size);
1860                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
1861                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
1862                         &state2_size, cdesc->aes_cmac)) {
1863                         cdesc->aes_cmac ? QAT_LOG(ERR,
1864                                                   "(CMAC)precompute failed")
1865                                         : QAT_LOG(ERR,
1866                                                   "(XCBC)precompute failed");
1867                         return -EFAULT;
1868                 }
1869                 break;
1870         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1871         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1872                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1873                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
1874                 if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
1875                         authkeylen, cdesc->cd_cur_ptr + state1_size,
1876                         &state2_size, cdesc->aes_cmac)) {
1877                         QAT_LOG(ERR, "(GCM)precompute failed");
1878                         return -EFAULT;
1879                 }
1880                 /*
1881                  * Write (the length of AAD) into bytes 16-19 of state2
1882                  * in big-endian format. This field is 8 bytes
1883                  */
1884                 auth_param->u2.aad_sz =
1885                                 RTE_ALIGN_CEIL(aad_length, 16);
1886                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
1887
1888                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
1889                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
1890                                         ICP_QAT_HW_GALOIS_H_SZ);
1891                 *aad_len = rte_bswap32(aad_length);
1892                 cdesc->aad_len = aad_length;
1893                 break;
1894         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1895                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1896                 state1_size = qat_hash_get_state1_size(
1897                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
1898                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
1899                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1900
1901                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
1902                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
1903                 cipherconfig->cipher_config.val =
1904                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
1905                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
1906                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
1907                         ICP_QAT_HW_CIPHER_ENCRYPT);
1908                 memcpy(cipherconfig->key, authkey, authkeylen);
1909                 memset(cipherconfig->key + authkeylen,
1910                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
1911                 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
1912                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1913                 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1914                 break;
1915         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1916                 hash->auth_config.config =
1917                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
1918                                 cdesc->qat_hash_alg, digestsize);
1919                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1920                 state1_size = qat_hash_get_state1_size(
1921                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
1922                 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
1923                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
1924                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
1925
1926                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1927                 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1928                 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1929                 cdesc->min_qat_dev_gen = QAT_GEN2;
1930
1931                 break;
1932         case ICP_QAT_HW_AUTH_ALGO_MD5:
1933                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
1934                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1935                         cdesc->aes_cmac)) {
1936                         QAT_LOG(ERR, "(MD5)precompute failed");
1937                         return -EFAULT;
1938                 }
1939                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
1940                 break;
1941         case ICP_QAT_HW_AUTH_ALGO_NULL:
1942                 state1_size = qat_hash_get_state1_size(
1943                                 ICP_QAT_HW_AUTH_ALGO_NULL);
1944                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
1945                 break;
1946         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1947                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
1948                 state1_size = qat_hash_get_state1_size(
1949                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
1950                 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
1951                                 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
1952
1953                 if (aad_length > 0) {
1954                         aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
1955                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
1956                         auth_param->u2.aad_sz =
1957                         RTE_ALIGN_CEIL(aad_length,
1958                         ICP_QAT_HW_CCM_AAD_ALIGNMENT);
1959                 } else {
1960                         auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
1961                 }
1962                 cdesc->aad_len = aad_length;
1963                 hash->auth_counter.counter = 0;
1964
1965                 hash_cd_ctrl->outer_prefix_sz = digestsize;
1966                 auth_param->hash_state_sz = digestsize;
1967
1968                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1969                 break;
1970         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1971                 state1_size = qat_hash_get_state1_size(
1972                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
1973                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
1974                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1975                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
1976                                                         + authkeylen);
1977                 /*
1978                 * The Inner Hash Initial State2 block must contain IK
1979                 * (Initialisation Key), followed by IK XOR-ed with KM
1980                 * (Key Modifier): IK||(IK^KM).
1981                 */
1982                 /* write the auth key */
1983                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1984                 /* initialise temp key with auth key */
1985                 memcpy(pTempKey, authkey, authkeylen);
1986                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
1987                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
1988                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
1989                 break;
1990         default:
1991                 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
1992                 return -EFAULT;
1993         }
1994
1995         /* Request template setup */
1996         qat_sym_session_init_common_hdr(header, qat_proto_flag);
1997         header->service_cmd_id = cdesc->qat_cmd;
1998
1999         /* Auth CD config setup */
2000         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
2001         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
2002         hash_cd_ctrl->inner_res_sz = digestsize;
2003         hash_cd_ctrl->final_sz = digestsize;
2004         hash_cd_ctrl->inner_state1_sz = state1_size;
2005         auth_param->auth_res_sz = digestsize;
2006
2007         hash_cd_ctrl->inner_state2_sz  = state2_size;
2008         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2009                         ((sizeof(struct icp_qat_hw_auth_setup) +
2010                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2011                                         >> 3);
2012
2013         cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2014         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2015
2016         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2017         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2018
2019         return 0;
2020 }
2021
2022 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2023 {
2024         switch (key_len) {
2025         case ICP_QAT_HW_AES_128_KEY_SZ:
2026                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2027                 break;
2028         case ICP_QAT_HW_AES_192_KEY_SZ:
2029                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2030                 break;
2031         case ICP_QAT_HW_AES_256_KEY_SZ:
2032                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2033                 break;
2034         default:
2035                 return -EINVAL;
2036         }
2037         return 0;
2038 }
2039
2040 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2041                 enum icp_qat_hw_cipher_algo *alg)
2042 {
2043         switch (key_len) {
2044         case ICP_QAT_HW_AES_128_KEY_SZ:
2045                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2046                 break;
2047         case ICP_QAT_HW_AES_256_KEY_SZ:
2048                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2049                 break;
2050         default:
2051                 return -EINVAL;
2052         }
2053         return 0;
2054 }
2055
2056 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2057 {
2058         switch (key_len) {
2059         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2060                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2061                 break;
2062         default:
2063                 return -EINVAL;
2064         }
2065         return 0;
2066 }
2067
2068 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2069 {
2070         switch (key_len) {
2071         case ICP_QAT_HW_KASUMI_KEY_SZ:
2072                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2073                 break;
2074         default:
2075                 return -EINVAL;
2076         }
2077         return 0;
2078 }
2079
2080 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2081 {
2082         switch (key_len) {
2083         case ICP_QAT_HW_DES_KEY_SZ:
2084                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2085                 break;
2086         default:
2087                 return -EINVAL;
2088         }
2089         return 0;
2090 }
2091
2092 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2093 {
2094         switch (key_len) {
2095         case QAT_3DES_KEY_SZ_OPT1:
2096         case QAT_3DES_KEY_SZ_OPT2:
2097         case QAT_3DES_KEY_SZ_OPT3:
2098                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2099                 break;
2100         default:
2101                 return -EINVAL;
2102         }
2103         return 0;
2104 }
2105
2106 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2107 {
2108         switch (key_len) {
2109         case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2110                 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2111                 break;
2112         default:
2113                 return -EINVAL;
2114         }
2115         return 0;
2116 }
2117
2118 #ifdef RTE_LIB_SECURITY
2119 static int
2120 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2121 {
2122         struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2123         struct rte_security_docsis_xform *docsis = &conf->docsis;
2124
2125         /* CRC generate -> Cipher encrypt */
2126         if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2127
2128                 if (crypto_sym != NULL &&
2129                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2130                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2131                     crypto_sym->cipher.algo ==
2132                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2133                     (crypto_sym->cipher.key.length ==
2134                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2135                      crypto_sym->cipher.key.length ==
2136                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2137                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2138                     crypto_sym->next == NULL) {
2139                         return 0;
2140                 }
2141         /* Cipher decrypt -> CRC verify */
2142         } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2143
2144                 if (crypto_sym != NULL &&
2145                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2146                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2147                     crypto_sym->cipher.algo ==
2148                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2149                     (crypto_sym->cipher.key.length ==
2150                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2151                      crypto_sym->cipher.key.length ==
2152                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2153                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2154                     crypto_sym->next == NULL) {
2155                         return 0;
2156                 }
2157         }
2158
2159         return -EINVAL;
2160 }
2161
2162 static int
2163 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2164                 struct rte_security_session_conf *conf, void *session_private)
2165 {
2166         int ret;
2167         int qat_cmd_id;
2168         struct rte_crypto_sym_xform *xform = NULL;
2169         struct qat_sym_session *session = session_private;
2170
2171         /* Clear the session */
2172         memset(session, 0, qat_sym_session_get_private_size(dev));
2173
2174         ret = qat_sec_session_check_docsis(conf);
2175         if (ret) {
2176                 QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2177                 return ret;
2178         }
2179
2180         xform = conf->crypto_xform;
2181
2182         /* Verify the session physical address is known */
2183         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2184         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2185                 QAT_LOG(ERR,
2186                         "Session physical address unknown. Bad memory pool.");
2187                 return -EINVAL;
2188         }
2189
2190         /* Set context descriptor physical address */
2191         session->cd_paddr = session_paddr +
2192                         offsetof(struct qat_sym_session, cd);
2193
2194         session->min_qat_dev_gen = QAT_GEN1;
2195
2196         /* Get requested QAT command id - should be cipher */
2197         qat_cmd_id = qat_get_cmd_id(xform);
2198         if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2199                 QAT_LOG(ERR, "Unsupported xform chain requested");
2200                 return -ENOTSUP;
2201         }
2202         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2203
2204         ret = qat_sym_session_configure_cipher(dev, xform, session);
2205         if (ret < 0)
2206                 return ret;
2207
2208         return 0;
2209 }
2210
2211 int
2212 qat_security_session_create(void *dev,
2213                                 struct rte_security_session_conf *conf,
2214                                 struct rte_security_session *sess,
2215                                 struct rte_mempool *mempool)
2216 {
2217         void *sess_private_data;
2218         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2219         int ret;
2220
2221         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2222                         conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2223                 QAT_LOG(ERR, "Invalid security protocol");
2224                 return -EINVAL;
2225         }
2226
2227         if (rte_mempool_get(mempool, &sess_private_data)) {
2228                 QAT_LOG(ERR, "Couldn't get object from session mempool");
2229                 return -ENOMEM;
2230         }
2231
2232         ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2233                         sess_private_data);
2234         if (ret != 0) {
2235                 QAT_LOG(ERR, "Failed to configure session parameters");
2236                 /* Return session to mempool */
2237                 rte_mempool_put(mempool, sess_private_data);
2238                 return ret;
2239         }
2240
2241         set_sec_session_private_data(sess, sess_private_data);
2242
2243         return ret;
2244 }
2245
2246 int
2247 qat_security_session_destroy(void *dev __rte_unused,
2248                                  struct rte_security_session *sess)
2249 {
2250         void *sess_priv = get_sec_session_private_data(sess);
2251         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2252
2253         if (sess_priv) {
2254                 if (s->bpi_ctx)
2255                         bpi_cipher_ctx_free(s->bpi_ctx);
2256                 memset(s, 0, qat_sym_session_get_private_size(dev));
2257                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2258
2259                 set_sec_session_private_data(sess, NULL);
2260                 rte_mempool_put(sess_mp, sess_priv);
2261         }
2262         return 0;
2263 }
2264 #endif