crypto/qat: rename content descriptor functions
[dpdk.git] / drivers / crypto / qat / qat_sym_session.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  * Copyright(c) 2015-2019 Intel Corporation
3  */
4
5 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
6 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
7 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
8 #include <openssl/evp.h>        /* Needed for bpi runt block processing */
9
10 #include <rte_memcpy.h>
11 #include <rte_common.h>
12 #include <rte_spinlock.h>
13 #include <rte_byteorder.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_crypto_sym.h>
17 #ifdef RTE_LIB_SECURITY
18 #include <rte_security.h>
19 #endif
20
21 #include "qat_logs.h"
22 #include "qat_sym_session.h"
23 #include "qat_sym_pmd.h"
24
25 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
26 static const uint8_t sha1InitialState[] = {
27         0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
28         0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
29
30 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
31 static const uint8_t sha224InitialState[] = {
32         0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
33         0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
34         0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
35
36 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
37 static const uint8_t sha256InitialState[] = {
38         0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
39         0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
40         0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
41
42 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
43 static const uint8_t sha384InitialState[] = {
44         0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
45         0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
46         0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
47         0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
48         0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
49         0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
50
51 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
52 static const uint8_t sha512InitialState[] = {
53         0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
54         0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
55         0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
56         0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
57         0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
58         0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
59
60 static int
61 qat_sym_cd_cipher_set(struct qat_sym_session *cd,
62                                                 const uint8_t *enckey,
63                                                 uint32_t enckeylen);
64
65 static int
66 qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
67                                                 const uint8_t *authkey,
68                                                 uint32_t authkeylen,
69                                                 uint32_t aad_length,
70                                                 uint32_t digestsize,
71                                                 unsigned int operation);
72
73 /** Frees a context previously created
74  *  Depends on openssl libcrypto
75  */
76 static void
77 bpi_cipher_ctx_free(void *bpi_ctx)
78 {
79         if (bpi_ctx != NULL)
80                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
81 }
82
83 /** Creates a context in either AES or DES in ECB mode
84  *  Depends on openssl libcrypto
85  */
86 static int
87 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
88                 enum rte_crypto_cipher_operation direction __rte_unused,
89                 const uint8_t *key, uint16_t key_length, void **ctx)
90 {
91         const EVP_CIPHER *algo = NULL;
92         int ret;
93         *ctx = EVP_CIPHER_CTX_new();
94
95         if (*ctx == NULL) {
96                 ret = -ENOMEM;
97                 goto ctx_init_err;
98         }
99
100         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
101                 algo = EVP_des_ecb();
102         else
103                 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
104                         algo = EVP_aes_128_ecb();
105                 else
106                         algo = EVP_aes_256_ecb();
107
108         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
109         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
110                 ret = -EINVAL;
111                 goto ctx_init_err;
112         }
113
114         return 0;
115
116 ctx_init_err:
117         if (*ctx != NULL)
118                 EVP_CIPHER_CTX_free(*ctx);
119         return ret;
120 }
121
122 static int
123 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
124                 struct qat_sym_dev_private *internals)
125 {
126         int i = 0;
127         const struct rte_cryptodev_capabilities *capability;
128
129         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
130                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
131                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
132                         continue;
133
134                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
135                         continue;
136
137                 if (capability->sym.cipher.algo == algo)
138                         return 1;
139         }
140         return 0;
141 }
142
143 static int
144 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
145                 struct qat_sym_dev_private *internals)
146 {
147         int i = 0;
148         const struct rte_cryptodev_capabilities *capability;
149
150         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
151                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
152                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
153                         continue;
154
155                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
156                         continue;
157
158                 if (capability->sym.auth.algo == algo)
159                         return 1;
160         }
161         return 0;
162 }
163
164 void
165 qat_sym_session_clear(struct rte_cryptodev *dev,
166                 struct rte_cryptodev_sym_session *sess)
167 {
168         uint8_t index = dev->driver_id;
169         void *sess_priv = get_sym_session_private_data(sess, index);
170         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
171
172         if (sess_priv) {
173                 if (s->bpi_ctx)
174                         bpi_cipher_ctx_free(s->bpi_ctx);
175                 memset(s, 0, qat_sym_session_get_private_size(dev));
176                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
177
178                 set_sym_session_private_data(sess, index, NULL);
179                 rte_mempool_put(sess_mp, sess_priv);
180         }
181 }
182
183 static int
184 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
185 {
186         /* Cipher Only */
187         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
188                 return ICP_QAT_FW_LA_CMD_CIPHER;
189
190         /* Authentication Only */
191         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
192                 return ICP_QAT_FW_LA_CMD_AUTH;
193
194         /* AEAD */
195         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
196                 /* AES-GCM and AES-CCM works with different direction
197                  * GCM first encrypts and generate hash where AES-CCM
198                  * first generate hash and encrypts. Similar relation
199                  * applies to decryption.
200                  */
201                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
202                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
203                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
204                         else
205                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
206                 else
207                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
208                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
209                         else
210                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
211         }
212
213         if (xform->next == NULL)
214                 return -1;
215
216         /* Cipher then Authenticate */
217         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
218                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
219                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
220
221         /* Authenticate then Cipher */
222         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
223                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
224                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
225
226         return -1;
227 }
228
229 static struct rte_crypto_auth_xform *
230 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
231 {
232         do {
233                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
234                         return &xform->auth;
235
236                 xform = xform->next;
237         } while (xform);
238
239         return NULL;
240 }
241
242 static struct rte_crypto_cipher_xform *
243 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
244 {
245         do {
246                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
247                         return &xform->cipher;
248
249                 xform = xform->next;
250         } while (xform);
251
252         return NULL;
253 }
254
255 int
256 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
257                 struct rte_crypto_sym_xform *xform,
258                 struct qat_sym_session *session)
259 {
260         struct qat_sym_dev_private *internals = dev->data->dev_private;
261         struct rte_crypto_cipher_xform *cipher_xform = NULL;
262         enum qat_device_gen qat_dev_gen =
263                                 internals->qat_dev->qat_dev_gen;
264         int ret;
265
266         /* Get cipher xform from crypto xform chain */
267         cipher_xform = qat_get_cipher_xform(xform);
268
269         session->cipher_iv.offset = cipher_xform->iv.offset;
270         session->cipher_iv.length = cipher_xform->iv.length;
271
272         switch (cipher_xform->algo) {
273         case RTE_CRYPTO_CIPHER_AES_CBC:
274                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
275                                 &session->qat_cipher_alg) != 0) {
276                         QAT_LOG(ERR, "Invalid AES cipher key size");
277                         ret = -EINVAL;
278                         goto error_out;
279                 }
280                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
281                 break;
282         case RTE_CRYPTO_CIPHER_AES_CTR:
283                 if (qat_sym_validate_aes_key(cipher_xform->key.length,
284                                 &session->qat_cipher_alg) != 0) {
285                         QAT_LOG(ERR, "Invalid AES cipher key size");
286                         ret = -EINVAL;
287                         goto error_out;
288                 }
289                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
290                 if (qat_dev_gen == QAT_GEN4) {
291                         /* TODO: Filter WCP */
292                         ICP_QAT_FW_LA_SLICE_TYPE_SET(
293                                 session->fw_req.comn_hdr.serv_specif_flags,
294                                 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
295                         session->is_ucs = 1;
296                 }
297                 break;
298         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
299                 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
300                                         &session->qat_cipher_alg) != 0) {
301                         QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
302                         ret = -EINVAL;
303                         goto error_out;
304                 }
305                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
306                 break;
307         case RTE_CRYPTO_CIPHER_NULL:
308                 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
309                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
310                 break;
311         case RTE_CRYPTO_CIPHER_KASUMI_F8:
312                 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
313                                         &session->qat_cipher_alg) != 0) {
314                         QAT_LOG(ERR, "Invalid KASUMI cipher key size");
315                         ret = -EINVAL;
316                         goto error_out;
317                 }
318                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
319                 break;
320         case RTE_CRYPTO_CIPHER_3DES_CBC:
321                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
322                                 &session->qat_cipher_alg) != 0) {
323                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
324                         ret = -EINVAL;
325                         goto error_out;
326                 }
327                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
328                 break;
329         case RTE_CRYPTO_CIPHER_DES_CBC:
330                 if (qat_sym_validate_des_key(cipher_xform->key.length,
331                                 &session->qat_cipher_alg) != 0) {
332                         QAT_LOG(ERR, "Invalid DES cipher key size");
333                         ret = -EINVAL;
334                         goto error_out;
335                 }
336                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
337                 break;
338         case RTE_CRYPTO_CIPHER_3DES_CTR:
339                 if (qat_sym_validate_3des_key(cipher_xform->key.length,
340                                 &session->qat_cipher_alg) != 0) {
341                         QAT_LOG(ERR, "Invalid 3DES cipher key size");
342                         ret = -EINVAL;
343                         goto error_out;
344                 }
345                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
346                 break;
347         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
348                 ret = bpi_cipher_ctx_init(
349                                         cipher_xform->algo,
350                                         cipher_xform->op,
351                                         cipher_xform->key.data,
352                                         cipher_xform->key.length,
353                                         &session->bpi_ctx);
354                 if (ret != 0) {
355                         QAT_LOG(ERR, "failed to create DES BPI ctx");
356                         goto error_out;
357                 }
358                 if (qat_sym_validate_des_key(cipher_xform->key.length,
359                                 &session->qat_cipher_alg) != 0) {
360                         QAT_LOG(ERR, "Invalid DES cipher key size");
361                         ret = -EINVAL;
362                         goto error_out;
363                 }
364                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
365                 break;
366         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
367                 ret = bpi_cipher_ctx_init(
368                                         cipher_xform->algo,
369                                         cipher_xform->op,
370                                         cipher_xform->key.data,
371                                         cipher_xform->key.length,
372                                         &session->bpi_ctx);
373                 if (ret != 0) {
374                         QAT_LOG(ERR, "failed to create AES BPI ctx");
375                         goto error_out;
376                 }
377                 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
378                                 &session->qat_cipher_alg) != 0) {
379                         QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
380                         ret = -EINVAL;
381                         goto error_out;
382                 }
383                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
384                 break;
385         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
386                 if (!qat_is_cipher_alg_supported(
387                         cipher_xform->algo, internals)) {
388                         QAT_LOG(ERR, "%s not supported on this device",
389                                 rte_crypto_cipher_algorithm_strings
390                                         [cipher_xform->algo]);
391                         ret = -ENOTSUP;
392                         goto error_out;
393                 }
394                 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
395                                 &session->qat_cipher_alg) != 0) {
396                         QAT_LOG(ERR, "Invalid ZUC cipher key size");
397                         ret = -EINVAL;
398                         goto error_out;
399                 }
400                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
401                 break;
402         case RTE_CRYPTO_CIPHER_AES_XTS:
403                 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
404                         QAT_LOG(ERR, "AES-XTS-192 not supported");
405                         ret = -EINVAL;
406                         goto error_out;
407                 }
408                 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
409                                 &session->qat_cipher_alg) != 0) {
410                         QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
411                         ret = -EINVAL;
412                         goto error_out;
413                 }
414                 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
415                 break;
416         case RTE_CRYPTO_CIPHER_3DES_ECB:
417         case RTE_CRYPTO_CIPHER_AES_ECB:
418         case RTE_CRYPTO_CIPHER_AES_F8:
419         case RTE_CRYPTO_CIPHER_ARC4:
420                 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
421                                 cipher_xform->algo);
422                 ret = -ENOTSUP;
423                 goto error_out;
424         default:
425                 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
426                                 cipher_xform->algo);
427                 ret = -EINVAL;
428                 goto error_out;
429         }
430
431         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
432                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
433         else
434                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
435
436         if (qat_sym_cd_cipher_set(session,
437                                                 cipher_xform->key.data,
438                                                 cipher_xform->key.length)) {
439                 ret = -EINVAL;
440                 goto error_out;
441         }
442
443         return 0;
444
445 error_out:
446         if (session->bpi_ctx) {
447                 bpi_cipher_ctx_free(session->bpi_ctx);
448                 session->bpi_ctx = NULL;
449         }
450         return ret;
451 }
452
453 int
454 qat_sym_session_configure(struct rte_cryptodev *dev,
455                 struct rte_crypto_sym_xform *xform,
456                 struct rte_cryptodev_sym_session *sess,
457                 struct rte_mempool *mempool)
458 {
459         void *sess_private_data;
460         int ret;
461
462         if (rte_mempool_get(mempool, &sess_private_data)) {
463                 CDEV_LOG_ERR(
464                         "Couldn't get object from session mempool");
465                 return -ENOMEM;
466         }
467
468         ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
469         if (ret != 0) {
470                 QAT_LOG(ERR,
471                     "Crypto QAT PMD: failed to configure session parameters");
472
473                 /* Return session to mempool */
474                 rte_mempool_put(mempool, sess_private_data);
475                 return ret;
476         }
477
478         set_sym_session_private_data(sess, dev->driver_id,
479                 sess_private_data);
480
481         return 0;
482 }
483
484 static void
485 qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
486                 uint8_t hash_flag)
487 {
488         struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
489         struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
490                         (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
491                         session->fw_req.cd_ctrl.content_desc_ctrl_lw;
492
493         /* Set the Use Extended Protocol Flags bit in LW 1 */
494         QAT_FIELD_SET(header->comn_req_flags,
495                         QAT_COMN_EXT_FLAGS_USED,
496                         QAT_COMN_EXT_FLAGS_BITPOS,
497                         QAT_COMN_EXT_FLAGS_MASK);
498
499         /* Set Hash Flags in LW 28 */
500         cd_ctrl->hash_flags |= hash_flag;
501
502         /* Set proto flags in LW 1 */
503         switch (session->qat_cipher_alg) {
504         case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
505                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
506                                 ICP_QAT_FW_LA_SNOW_3G_PROTO);
507                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
508                                 header->serv_specif_flags, 0);
509                 break;
510         case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
511                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
512                                 ICP_QAT_FW_LA_NO_PROTO);
513                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
514                                 header->serv_specif_flags,
515                                 ICP_QAT_FW_LA_ZUC_3G_PROTO);
516                 break;
517         default:
518                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
519                                 ICP_QAT_FW_LA_NO_PROTO);
520                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
521                                 header->serv_specif_flags, 0);
522                 break;
523         }
524 }
525
526 static void
527 qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
528                 struct qat_sym_session *session)
529 {
530         const struct qat_sym_dev_private *qat_private = dev->data->dev_private;
531         enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
532                         QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
533
534         if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
535                         session->qat_cipher_alg !=
536                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
537                 session->min_qat_dev_gen = min_dev_gen;
538                 qat_sym_session_set_ext_hash_flags(session,
539                         1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
540         } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
541                         session->qat_cipher_alg !=
542                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
543                 session->min_qat_dev_gen = min_dev_gen;
544                 qat_sym_session_set_ext_hash_flags(session,
545                         1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
546         } else if ((session->aes_cmac ||
547                         session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
548                         (session->qat_cipher_alg ==
549                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
550                         session->qat_cipher_alg ==
551                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
552                 session->min_qat_dev_gen = min_dev_gen;
553                 qat_sym_session_set_ext_hash_flags(session, 0);
554         }
555 }
556
557 int
558 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
559                 struct rte_crypto_sym_xform *xform, void *session_private)
560 {
561         struct qat_sym_session *session = session_private;
562         struct qat_sym_dev_private *internals = dev->data->dev_private;
563         enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
564         int ret;
565         int qat_cmd_id;
566
567         /* Verify the session physical address is known */
568         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
569         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
570                 QAT_LOG(ERR,
571                         "Session physical address unknown. Bad memory pool.");
572                 return -EINVAL;
573         }
574
575         memset(session, 0, sizeof(*session));
576         /* Set context descriptor physical address */
577         session->cd_paddr = session_paddr +
578                         offsetof(struct qat_sym_session, cd);
579
580         session->min_qat_dev_gen = QAT_GEN1;
581         session->is_ucs = 0;
582
583         /* Get requested QAT command id */
584         qat_cmd_id = qat_get_cmd_id(xform);
585         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
586                 QAT_LOG(ERR, "Unsupported xform chain requested");
587                 return -ENOTSUP;
588         }
589         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
590         switch (session->qat_cmd) {
591         case ICP_QAT_FW_LA_CMD_CIPHER:
592                 ret = qat_sym_session_configure_cipher(dev, xform, session);
593                 if (ret < 0)
594                         return ret;
595                 break;
596         case ICP_QAT_FW_LA_CMD_AUTH:
597                 ret = qat_sym_session_configure_auth(dev, xform, session);
598                 if (ret < 0)
599                         return ret;
600                 session->is_single_pass_gmac =
601                                qat_dev_gen == QAT_GEN3 &&
602                                xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC &&
603                                xform->auth.iv.length == QAT_AES_GCM_SPC_IV_SIZE;
604                 break;
605         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
606                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
607                         ret = qat_sym_session_configure_aead(dev, xform,
608                                         session);
609                         if (ret < 0)
610                                 return ret;
611                 } else {
612                         ret = qat_sym_session_configure_cipher(dev,
613                                         xform, session);
614                         if (ret < 0)
615                                 return ret;
616                         ret = qat_sym_session_configure_auth(dev,
617                                         xform, session);
618                         if (ret < 0)
619                                 return ret;
620                         /* Special handling of mixed hash+cipher algorithms */
621                         qat_sym_session_handle_mixed(dev, session);
622                 }
623                 break;
624         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
625                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
626                         ret = qat_sym_session_configure_aead(dev, xform,
627                                         session);
628                         if (ret < 0)
629                                 return ret;
630                 } else {
631                         ret = qat_sym_session_configure_auth(dev,
632                                         xform, session);
633                         if (ret < 0)
634                                 return ret;
635                         ret = qat_sym_session_configure_cipher(dev,
636                                         xform, session);
637                         if (ret < 0)
638                                 return ret;
639                         /* Special handling of mixed hash+cipher algorithms */
640                         qat_sym_session_handle_mixed(dev, session);
641                 }
642                 break;
643         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
644         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
645         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
646         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
647         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
648         case ICP_QAT_FW_LA_CMD_MGF1:
649         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
650         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
651         case ICP_QAT_FW_LA_CMD_DELIMITER:
652         QAT_LOG(ERR, "Unsupported Service %u",
653                 session->qat_cmd);
654                 return -ENOTSUP;
655         default:
656         QAT_LOG(ERR, "Unsupported Service %u",
657                 session->qat_cmd);
658                 return -ENOTSUP;
659         }
660
661         return 0;
662 }
663
664 static int
665 qat_sym_session_handle_single_pass(struct qat_sym_session *session,
666                 struct rte_crypto_aead_xform *aead_xform)
667 {
668         struct icp_qat_fw_la_cipher_req_params *cipher_param =
669                         (void *) &session->fw_req.serv_specif_rqpars;
670
671         session->is_single_pass = 1;
672         session->min_qat_dev_gen = QAT_GEN3;
673         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
674         if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
675                 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
676                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
677                         session->fw_req.comn_hdr.serv_specif_flags,
678                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
679         } else {
680                 /* Chacha-Poly is special case that use QAT CTR mode */
681                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
682         }
683         session->cipher_iv.offset = aead_xform->iv.offset;
684         session->cipher_iv.length = aead_xform->iv.length;
685         if (qat_sym_cd_cipher_set(session,
686                         aead_xform->key.data, aead_xform->key.length))
687                 return -EINVAL;
688         session->aad_len = aead_xform->aad_length;
689         session->digest_length = aead_xform->digest_length;
690         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
691                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
692                 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
693                 ICP_QAT_FW_LA_RET_AUTH_SET(
694                         session->fw_req.comn_hdr.serv_specif_flags,
695                         ICP_QAT_FW_LA_RET_AUTH_RES);
696         } else {
697                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
698                 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
699                 ICP_QAT_FW_LA_CMP_AUTH_SET(
700                         session->fw_req.comn_hdr.serv_specif_flags,
701                         ICP_QAT_FW_LA_CMP_AUTH_RES);
702         }
703         ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
704                         session->fw_req.comn_hdr.serv_specif_flags,
705                         ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
706         ICP_QAT_FW_LA_PROTO_SET(
707                         session->fw_req.comn_hdr.serv_specif_flags,
708                         ICP_QAT_FW_LA_NO_PROTO);
709         session->fw_req.comn_hdr.service_cmd_id =
710                         ICP_QAT_FW_LA_CMD_CIPHER;
711         session->cd.cipher.cipher_config.val =
712                         ICP_QAT_HW_CIPHER_CONFIG_BUILD(
713                                 ICP_QAT_HW_CIPHER_AEAD_MODE,
714                                 session->qat_cipher_alg,
715                                 ICP_QAT_HW_CIPHER_NO_CONVERT,
716                                 session->qat_dir);
717         QAT_FIELD_SET(session->cd.cipher.cipher_config.val,
718                         aead_xform->digest_length,
719                         QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
720                         QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
721         session->cd.cipher.cipher_config.reserved =
722                         ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
723                                 aead_xform->aad_length);
724         cipher_param->spc_aad_sz = aead_xform->aad_length;
725         cipher_param->spc_auth_res_sz = aead_xform->digest_length;
726
727         return 0;
728 }
729
730 int
731 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
732                                 struct rte_crypto_sym_xform *xform,
733                                 struct qat_sym_session *session)
734 {
735         struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
736         struct qat_sym_dev_private *internals = dev->data->dev_private;
737         const uint8_t *key_data = auth_xform->key.data;
738         uint8_t key_length = auth_xform->key.length;
739
740         session->aes_cmac = 0;
741         session->auth_key_length = auth_xform->key.length;
742         session->auth_iv.offset = auth_xform->iv.offset;
743         session->auth_iv.length = auth_xform->iv.length;
744         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
745
746         switch (auth_xform->algo) {
747         case RTE_CRYPTO_AUTH_SHA1:
748                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
749                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
750                 break;
751         case RTE_CRYPTO_AUTH_SHA224:
752                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
753                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
754                 break;
755         case RTE_CRYPTO_AUTH_SHA256:
756                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
757                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
758                 break;
759         case RTE_CRYPTO_AUTH_SHA384:
760                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
761                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
762                 break;
763         case RTE_CRYPTO_AUTH_SHA512:
764                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
765                 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
766                 break;
767         case RTE_CRYPTO_AUTH_SHA1_HMAC:
768                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
769                 break;
770         case RTE_CRYPTO_AUTH_SHA224_HMAC:
771                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
772                 break;
773         case RTE_CRYPTO_AUTH_SHA256_HMAC:
774                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
775                 break;
776         case RTE_CRYPTO_AUTH_SHA384_HMAC:
777                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
778                 break;
779         case RTE_CRYPTO_AUTH_SHA512_HMAC:
780                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
781                 break;
782         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
783                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
784                 break;
785         case RTE_CRYPTO_AUTH_AES_CMAC:
786                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
787                 session->aes_cmac = 1;
788                 break;
789         case RTE_CRYPTO_AUTH_AES_GMAC:
790                 if (qat_sym_validate_aes_key(auth_xform->key.length,
791                                 &session->qat_cipher_alg) != 0) {
792                         QAT_LOG(ERR, "Invalid AES key size");
793                         return -EINVAL;
794                 }
795                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
796                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
797                 if (session->auth_iv.length == 0)
798                         session->auth_iv.length = AES_GCM_J0_LEN;
799                 break;
800         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
801                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
802                 break;
803         case RTE_CRYPTO_AUTH_MD5_HMAC:
804                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
805                 break;
806         case RTE_CRYPTO_AUTH_NULL:
807                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
808                 break;
809         case RTE_CRYPTO_AUTH_KASUMI_F9:
810                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
811                 break;
812         case RTE_CRYPTO_AUTH_ZUC_EIA3:
813                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
814                         QAT_LOG(ERR, "%s not supported on this device",
815                                 rte_crypto_auth_algorithm_strings
816                                 [auth_xform->algo]);
817                         return -ENOTSUP;
818                 }
819                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
820                 break;
821         case RTE_CRYPTO_AUTH_MD5:
822         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
823                 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
824                                 auth_xform->algo);
825                 return -ENOTSUP;
826         default:
827                 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
828                                 auth_xform->algo);
829                 return -EINVAL;
830         }
831
832         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
833                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
834                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
835                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
836                         /*
837                          * It needs to create cipher desc content first,
838                          * then authentication
839                          */
840
841                         if (qat_sym_cd_cipher_set(session,
842                                                 auth_xform->key.data,
843                                                 auth_xform->key.length))
844                                 return -EINVAL;
845
846                         if (qat_sym_cd_auth_set(session,
847                                                 key_data,
848                                                 key_length,
849                                                 0,
850                                                 auth_xform->digest_length,
851                                                 auth_xform->op))
852                                 return -EINVAL;
853                 } else {
854                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
855                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
856                         /*
857                          * It needs to create authentication desc content first,
858                          * then cipher
859                          */
860
861                         if (qat_sym_cd_auth_set(session,
862                                         key_data,
863                                         key_length,
864                                         0,
865                                         auth_xform->digest_length,
866                                         auth_xform->op))
867                                 return -EINVAL;
868
869                         if (qat_sym_cd_cipher_set(session,
870                                                 auth_xform->key.data,
871                                                 auth_xform->key.length))
872                                 return -EINVAL;
873                 }
874                 /* Restore to authentication only only */
875                 session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
876         } else {
877                 if (qat_sym_cd_auth_set(session,
878                                 key_data,
879                                 key_length,
880                                 0,
881                                 auth_xform->digest_length,
882                                 auth_xform->op))
883                         return -EINVAL;
884         }
885
886         session->digest_length = auth_xform->digest_length;
887         return 0;
888 }
889
890 int
891 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
892                                 struct rte_crypto_sym_xform *xform,
893                                 struct qat_sym_session *session)
894 {
895         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
896         enum rte_crypto_auth_operation crypto_operation;
897         struct qat_sym_dev_private *internals =
898                         dev->data->dev_private;
899         enum qat_device_gen qat_dev_gen =
900                         internals->qat_dev->qat_dev_gen;
901
902         /*
903          * Store AEAD IV parameters as cipher IV,
904          * to avoid unnecessary memory usage
905          */
906         session->cipher_iv.offset = xform->aead.iv.offset;
907         session->cipher_iv.length = xform->aead.iv.length;
908
909         session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
910
911         session->is_single_pass = 0;
912         switch (aead_xform->algo) {
913         case RTE_CRYPTO_AEAD_AES_GCM:
914                 if (qat_sym_validate_aes_key(aead_xform->key.length,
915                                 &session->qat_cipher_alg) != 0) {
916                         QAT_LOG(ERR, "Invalid AES key size");
917                         return -EINVAL;
918                 }
919                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
920                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
921                 if (qat_dev_gen > QAT_GEN2 && aead_xform->iv.length ==
922                                 QAT_AES_GCM_SPC_IV_SIZE) {
923                         return qat_sym_session_handle_single_pass(session,
924                                         aead_xform);
925                 }
926                 if (session->cipher_iv.length == 0)
927                         session->cipher_iv.length = AES_GCM_J0_LEN;
928
929                 break;
930         case RTE_CRYPTO_AEAD_AES_CCM:
931                 if (qat_sym_validate_aes_key(aead_xform->key.length,
932                                 &session->qat_cipher_alg) != 0) {
933                         QAT_LOG(ERR, "Invalid AES key size");
934                         return -EINVAL;
935                 }
936                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
937                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
938                 break;
939         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
940                 if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
941                         return -EINVAL;
942                 session->qat_cipher_alg =
943                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
944                 return qat_sym_session_handle_single_pass(session,
945                                                 aead_xform);
946         default:
947                 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
948                                 aead_xform->algo);
949                 return -EINVAL;
950         }
951
952         if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
953                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
954                         (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
955                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
956                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
957                 /*
958                  * It needs to create cipher desc content first,
959                  * then authentication
960                  */
961                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
962                         RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
963
964                 if (qat_sym_cd_cipher_set(session,
965                                         aead_xform->key.data,
966                                         aead_xform->key.length))
967                         return -EINVAL;
968
969                 if (qat_sym_cd_auth_set(session,
970                                         aead_xform->key.data,
971                                         aead_xform->key.length,
972                                         aead_xform->aad_length,
973                                         aead_xform->digest_length,
974                                         crypto_operation))
975                         return -EINVAL;
976         } else {
977                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
978                 /*
979                  * It needs to create authentication desc content first,
980                  * then cipher
981                  */
982
983                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
984                         RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
985
986                 if (qat_sym_cd_auth_set(session,
987                                         aead_xform->key.data,
988                                         aead_xform->key.length,
989                                         aead_xform->aad_length,
990                                         aead_xform->digest_length,
991                                         crypto_operation))
992                         return -EINVAL;
993
994                 if (qat_sym_cd_cipher_set(session,
995                                         aead_xform->key.data,
996                                         aead_xform->key.length))
997                         return -EINVAL;
998         }
999
1000         session->digest_length = aead_xform->digest_length;
1001         return 0;
1002 }
1003
1004 unsigned int qat_sym_session_get_private_size(
1005                 struct rte_cryptodev *dev __rte_unused)
1006 {
1007         return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
1008 }
1009
1010 /* returns block size in bytes per cipher algo */
1011 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
1012 {
1013         switch (qat_cipher_alg) {
1014         case ICP_QAT_HW_CIPHER_ALGO_DES:
1015                 return ICP_QAT_HW_DES_BLK_SZ;
1016         case ICP_QAT_HW_CIPHER_ALGO_3DES:
1017                 return ICP_QAT_HW_3DES_BLK_SZ;
1018         case ICP_QAT_HW_CIPHER_ALGO_AES128:
1019         case ICP_QAT_HW_CIPHER_ALGO_AES192:
1020         case ICP_QAT_HW_CIPHER_ALGO_AES256:
1021                 return ICP_QAT_HW_AES_BLK_SZ;
1022         default:
1023                 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
1024                 return -EFAULT;
1025         };
1026         return -EFAULT;
1027 }
1028
1029 /*
1030  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
1031  * This is digest size rounded up to nearest quadword
1032  */
1033 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1034 {
1035         switch (qat_hash_alg) {
1036         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1037                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
1038                                                 QAT_HW_DEFAULT_ALIGNMENT);
1039         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1040                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
1041                                                 QAT_HW_DEFAULT_ALIGNMENT);
1042         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1043                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
1044                                                 QAT_HW_DEFAULT_ALIGNMENT);
1045         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1046                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
1047                                                 QAT_HW_DEFAULT_ALIGNMENT);
1048         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1049                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1050                                                 QAT_HW_DEFAULT_ALIGNMENT);
1051         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1052                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
1053                                                 QAT_HW_DEFAULT_ALIGNMENT);
1054         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1055         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1056                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1057                                                 QAT_HW_DEFAULT_ALIGNMENT);
1058         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1059                 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1060                                                 QAT_HW_DEFAULT_ALIGNMENT);
1061         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1062                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1063                                                 QAT_HW_DEFAULT_ALIGNMENT);
1064         case ICP_QAT_HW_AUTH_ALGO_MD5:
1065                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1066                                                 QAT_HW_DEFAULT_ALIGNMENT);
1067         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1068                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1069                                                 QAT_HW_DEFAULT_ALIGNMENT);
1070         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1071                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1072                                                 QAT_HW_DEFAULT_ALIGNMENT);
1073         case ICP_QAT_HW_AUTH_ALGO_NULL:
1074                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1075                                                 QAT_HW_DEFAULT_ALIGNMENT);
1076         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1077                 /* return maximum state1 size in this case */
1078                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1079                                                 QAT_HW_DEFAULT_ALIGNMENT);
1080         default:
1081                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1082                 return -EFAULT;
1083         };
1084         return -EFAULT;
1085 }
1086
1087 /* returns digest size in bytes  per hash algo */
1088 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1089 {
1090         switch (qat_hash_alg) {
1091         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1092                 return ICP_QAT_HW_SHA1_STATE1_SZ;
1093         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1094                 return ICP_QAT_HW_SHA224_STATE1_SZ;
1095         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1096                 return ICP_QAT_HW_SHA256_STATE1_SZ;
1097         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1098                 return ICP_QAT_HW_SHA384_STATE1_SZ;
1099         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1100                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1101         case ICP_QAT_HW_AUTH_ALGO_MD5:
1102                 return ICP_QAT_HW_MD5_STATE1_SZ;
1103         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1104                 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1105         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1106                 /* return maximum digest size in this case */
1107                 return ICP_QAT_HW_SHA512_STATE1_SZ;
1108         default:
1109                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1110                 return -EFAULT;
1111         };
1112         return -EFAULT;
1113 }
1114
1115 /* returns block size in byes per hash algo */
1116 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1117 {
1118         switch (qat_hash_alg) {
1119         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1120                 return SHA_CBLOCK;
1121         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1122                 return SHA256_CBLOCK;
1123         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1124                 return SHA256_CBLOCK;
1125         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1126                 return SHA512_CBLOCK;
1127         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1128                 return SHA512_CBLOCK;
1129         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1130                 return 16;
1131         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1132                 return ICP_QAT_HW_AES_BLK_SZ;
1133         case ICP_QAT_HW_AUTH_ALGO_MD5:
1134                 return MD5_CBLOCK;
1135         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1136                 /* return maximum block size in this case */
1137                 return SHA512_CBLOCK;
1138         default:
1139                 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1140                 return -EFAULT;
1141         };
1142         return -EFAULT;
1143 }
1144
1145 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1146 {
1147         SHA_CTX ctx;
1148
1149         if (!SHA1_Init(&ctx))
1150                 return -EFAULT;
1151         SHA1_Transform(&ctx, data_in);
1152         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1153         return 0;
1154 }
1155
1156 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1157 {
1158         SHA256_CTX ctx;
1159
1160         if (!SHA224_Init(&ctx))
1161                 return -EFAULT;
1162         SHA256_Transform(&ctx, data_in);
1163         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1164         return 0;
1165 }
1166
1167 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1168 {
1169         SHA256_CTX ctx;
1170
1171         if (!SHA256_Init(&ctx))
1172                 return -EFAULT;
1173         SHA256_Transform(&ctx, data_in);
1174         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1175         return 0;
1176 }
1177
1178 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1179 {
1180         SHA512_CTX ctx;
1181
1182         if (!SHA384_Init(&ctx))
1183                 return -EFAULT;
1184         SHA512_Transform(&ctx, data_in);
1185         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1186         return 0;
1187 }
1188
1189 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1190 {
1191         SHA512_CTX ctx;
1192
1193         if (!SHA512_Init(&ctx))
1194                 return -EFAULT;
1195         SHA512_Transform(&ctx, data_in);
1196         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1197         return 0;
1198 }
1199
1200 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1201 {
1202         MD5_CTX ctx;
1203
1204         if (!MD5_Init(&ctx))
1205                 return -EFAULT;
1206         MD5_Transform(&ctx, data_in);
1207         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1208
1209         return 0;
1210 }
1211
1212 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1213                         uint8_t *data_in,
1214                         uint8_t *data_out)
1215 {
1216         int digest_size;
1217         uint8_t digest[qat_hash_get_digest_size(
1218                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1219         uint32_t *hash_state_out_be32;
1220         uint64_t *hash_state_out_be64;
1221         int i;
1222
1223         /* Initialize to avoid gcc warning */
1224         memset(digest, 0, sizeof(digest));
1225
1226         digest_size = qat_hash_get_digest_size(hash_alg);
1227         if (digest_size <= 0)
1228                 return -EFAULT;
1229
1230         hash_state_out_be32 = (uint32_t *)data_out;
1231         hash_state_out_be64 = (uint64_t *)data_out;
1232
1233         switch (hash_alg) {
1234         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1235                 if (partial_hash_sha1(data_in, digest))
1236                         return -EFAULT;
1237                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1238                         *hash_state_out_be32 =
1239                                 rte_bswap32(*(((uint32_t *)digest)+i));
1240                 break;
1241         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1242                 if (partial_hash_sha224(data_in, digest))
1243                         return -EFAULT;
1244                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1245                         *hash_state_out_be32 =
1246                                 rte_bswap32(*(((uint32_t *)digest)+i));
1247                 break;
1248         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1249                 if (partial_hash_sha256(data_in, digest))
1250                         return -EFAULT;
1251                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1252                         *hash_state_out_be32 =
1253                                 rte_bswap32(*(((uint32_t *)digest)+i));
1254                 break;
1255         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1256                 if (partial_hash_sha384(data_in, digest))
1257                         return -EFAULT;
1258                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1259                         *hash_state_out_be64 =
1260                                 rte_bswap64(*(((uint64_t *)digest)+i));
1261                 break;
1262         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1263                 if (partial_hash_sha512(data_in, digest))
1264                         return -EFAULT;
1265                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1266                         *hash_state_out_be64 =
1267                                 rte_bswap64(*(((uint64_t *)digest)+i));
1268                 break;
1269         case ICP_QAT_HW_AUTH_ALGO_MD5:
1270                 if (partial_hash_md5(data_in, data_out))
1271                         return -EFAULT;
1272                 break;
1273         default:
1274                 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1275                 return -EFAULT;
1276         }
1277
1278         return 0;
1279 }
1280 #define HMAC_IPAD_VALUE 0x36
1281 #define HMAC_OPAD_VALUE 0x5c
1282 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1283
1284 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1285
1286 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1287 {
1288         int i;
1289
1290         derived[0] = base[0] << 1;
1291         for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1292                 derived[i] = base[i] << 1;
1293                 derived[i - 1] |= base[i] >> 7;
1294         }
1295
1296         if (base[0] & 0x80)
1297                 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1298 }
1299
1300 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1301                                 const uint8_t *auth_key,
1302                                 uint16_t auth_keylen,
1303                                 uint8_t *p_state_buf,
1304                                 uint16_t *p_state_len,
1305                                 uint8_t aes_cmac)
1306 {
1307         int block_size;
1308         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1309         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1310         int i;
1311
1312         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1313
1314                 /* CMAC */
1315                 if (aes_cmac) {
1316                         AES_KEY enc_key;
1317                         uint8_t *in = NULL;
1318                         uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1319                         uint8_t *k1, *k2;
1320
1321                         auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1322
1323                         in = rte_zmalloc("AES CMAC K1",
1324                                          ICP_QAT_HW_AES_128_KEY_SZ, 16);
1325
1326                         if (in == NULL) {
1327                                 QAT_LOG(ERR, "Failed to alloc memory");
1328                                 return -ENOMEM;
1329                         }
1330
1331                         rte_memcpy(in, AES_CMAC_SEED,
1332                                    ICP_QAT_HW_AES_128_KEY_SZ);
1333                         rte_memcpy(p_state_buf, auth_key, auth_keylen);
1334
1335                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1336                                 &enc_key) != 0) {
1337                                 rte_free(in);
1338                                 return -EFAULT;
1339                         }
1340
1341                         AES_encrypt(in, k0, &enc_key);
1342
1343                         k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1344                         k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1345
1346                         aes_cmac_key_derive(k0, k1);
1347                         aes_cmac_key_derive(k1, k2);
1348
1349                         memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1350                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1351                         rte_free(in);
1352                         return 0;
1353                 } else {
1354                         static uint8_t qat_aes_xcbc_key_seed[
1355                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1356                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1357                                 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1358                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1359                                 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1360                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1361                                 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1362                         };
1363
1364                         uint8_t *in = NULL;
1365                         uint8_t *out = p_state_buf;
1366                         int x;
1367                         AES_KEY enc_key;
1368
1369                         in = rte_zmalloc("working mem for key",
1370                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1371                         if (in == NULL) {
1372                                 QAT_LOG(ERR, "Failed to alloc memory");
1373                                 return -ENOMEM;
1374                         }
1375
1376                         rte_memcpy(in, qat_aes_xcbc_key_seed,
1377                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1378                         for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1379                                 if (AES_set_encrypt_key(auth_key,
1380                                                         auth_keylen << 3,
1381                                                         &enc_key) != 0) {
1382                                         rte_free(in -
1383                                           (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1384                                         memset(out -
1385                                            (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1386                                           0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1387                                         return -EFAULT;
1388                                 }
1389                                 AES_encrypt(in, out, &enc_key);
1390                                 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1391                                 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1392                         }
1393                         *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1394                         rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1395                         return 0;
1396                 }
1397
1398         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1399                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1400                 uint8_t *in = NULL;
1401                 uint8_t *out = p_state_buf;
1402                 AES_KEY enc_key;
1403
1404                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1405                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1406                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1407                 in = rte_zmalloc("working mem for key",
1408                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
1409                 if (in == NULL) {
1410                         QAT_LOG(ERR, "Failed to alloc memory");
1411                         return -ENOMEM;
1412                 }
1413
1414                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1415                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1416                         &enc_key) != 0) {
1417                         return -EFAULT;
1418                 }
1419                 AES_encrypt(in, out, &enc_key);
1420                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1421                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1422                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1423                 rte_free(in);
1424                 return 0;
1425         }
1426
1427         block_size = qat_hash_get_block_size(hash_alg);
1428         if (block_size < 0)
1429                 return block_size;
1430         /* init ipad and opad from key and xor with fixed values */
1431         memset(ipad, 0, block_size);
1432         memset(opad, 0, block_size);
1433
1434         if (auth_keylen > (unsigned int)block_size) {
1435                 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1436                 return -EFAULT;
1437         }
1438         rte_memcpy(ipad, auth_key, auth_keylen);
1439         rte_memcpy(opad, auth_key, auth_keylen);
1440
1441         for (i = 0; i < block_size; i++) {
1442                 uint8_t *ipad_ptr = ipad + i;
1443                 uint8_t *opad_ptr = opad + i;
1444                 *ipad_ptr ^= HMAC_IPAD_VALUE;
1445                 *opad_ptr ^= HMAC_OPAD_VALUE;
1446         }
1447
1448         /* do partial hash of ipad and copy to state1 */
1449         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1450                 memset(ipad, 0, block_size);
1451                 memset(opad, 0, block_size);
1452                 QAT_LOG(ERR, "ipad precompute failed");
1453                 return -EFAULT;
1454         }
1455
1456         /*
1457          * State len is a multiple of 8, so may be larger than the digest.
1458          * Put the partial hash of opad state_len bytes after state1
1459          */
1460         *p_state_len = qat_hash_get_state1_size(hash_alg);
1461         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1462                 memset(ipad, 0, block_size);
1463                 memset(opad, 0, block_size);
1464                 QAT_LOG(ERR, "opad precompute failed");
1465                 return -EFAULT;
1466         }
1467
1468         /*  don't leave data lying around */
1469         memset(ipad, 0, block_size);
1470         memset(opad, 0, block_size);
1471         return 0;
1472 }
1473
1474 static void
1475 qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
1476                 enum qat_sym_proto_flag proto_flags)
1477 {
1478         header->hdr_flags =
1479                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1480         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1481         header->comn_req_flags =
1482                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1483                                         QAT_COMN_PTR_TYPE_FLAT);
1484         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1485                                   ICP_QAT_FW_LA_PARTIAL_NONE);
1486         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1487                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1488
1489         switch (proto_flags)            {
1490         case QAT_CRYPTO_PROTO_FLAG_NONE:
1491                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1492                                         ICP_QAT_FW_LA_NO_PROTO);
1493                 break;
1494         case QAT_CRYPTO_PROTO_FLAG_CCM:
1495                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1496                                         ICP_QAT_FW_LA_CCM_PROTO);
1497                 break;
1498         case QAT_CRYPTO_PROTO_FLAG_GCM:
1499                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1500                                         ICP_QAT_FW_LA_GCM_PROTO);
1501                 break;
1502         case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1503                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1504                                         ICP_QAT_FW_LA_SNOW_3G_PROTO);
1505                 break;
1506         case QAT_CRYPTO_PROTO_FLAG_ZUC:
1507                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1508                         ICP_QAT_FW_LA_ZUC_3G_PROTO);
1509                 break;
1510         }
1511
1512         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1513                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
1514         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1515                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1516 }
1517
1518 /*
1519  *      Snow3G and ZUC should never use this function
1520  *      and set its protocol flag in both cipher and auth part of content
1521  *      descriptor building function
1522  */
1523 static enum qat_sym_proto_flag
1524 qat_get_crypto_proto_flag(uint16_t flags)
1525 {
1526         int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
1527         enum qat_sym_proto_flag qat_proto_flag =
1528                         QAT_CRYPTO_PROTO_FLAG_NONE;
1529
1530         switch (proto) {
1531         case ICP_QAT_FW_LA_GCM_PROTO:
1532                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1533                 break;
1534         case ICP_QAT_FW_LA_CCM_PROTO:
1535                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
1536                 break;
1537         }
1538
1539         return qat_proto_flag;
1540 }
1541
1542 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
1543                                                 const uint8_t *cipherkey,
1544                                                 uint32_t cipherkeylen)
1545 {
1546         struct icp_qat_hw_cipher_algo_blk *cipher;
1547         struct icp_qat_hw_cipher_algo_blk20 *cipher20;
1548         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1549         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1550         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1551         void *ptr = &req_tmpl->cd_ctrl;
1552         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1553         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1554         enum icp_qat_hw_cipher_convert key_convert;
1555         enum qat_sym_proto_flag qat_proto_flag =
1556                 QAT_CRYPTO_PROTO_FLAG_NONE;
1557         uint32_t total_key_size;
1558         uint16_t cipher_offset, cd_size;
1559         uint32_t wordIndex  = 0;
1560         uint32_t *temp_key = NULL;
1561
1562         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1563                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1564                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1565                                         ICP_QAT_FW_SLICE_CIPHER);
1566                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1567                                         ICP_QAT_FW_SLICE_DRAM_WR);
1568                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1569                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1570                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1571                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1572                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1573         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1574                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1575                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1576                                         ICP_QAT_FW_SLICE_CIPHER);
1577                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1578                                         ICP_QAT_FW_SLICE_AUTH);
1579                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1580                                         ICP_QAT_FW_SLICE_AUTH);
1581                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1582                                         ICP_QAT_FW_SLICE_DRAM_WR);
1583                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1584         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1585                 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1586                 return -EFAULT;
1587         }
1588
1589         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1590                 /*
1591                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
1592                  * Overriding default values previously set
1593                  */
1594                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1595                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1596         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1597                 || cdesc->qat_cipher_alg ==
1598                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1599                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1600         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1601                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1602         else
1603                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1604
1605         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1606                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1607                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1608                 cipher_cd_ctrl->cipher_state_sz =
1609                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1610                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1611
1612         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1613                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1614                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1615                 cipher_cd_ctrl->cipher_padding_sz =
1616                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1617         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1618                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1619                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1620                 qat_proto_flag =
1621                         qat_get_crypto_proto_flag(header->serv_specif_flags);
1622         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1623                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1624                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1625                 qat_proto_flag =
1626                         qat_get_crypto_proto_flag(header->serv_specif_flags);
1627         } else if (cdesc->qat_cipher_alg ==
1628                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1629                 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1630                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1631                 cipher_cd_ctrl->cipher_state_sz =
1632                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1633                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1634                 cdesc->min_qat_dev_gen = QAT_GEN2;
1635         } else {
1636                 total_key_size = cipherkeylen;
1637                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1638                 qat_proto_flag =
1639                         qat_get_crypto_proto_flag(header->serv_specif_flags);
1640         }
1641         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1642         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1643
1644         header->service_cmd_id = cdesc->qat_cmd;
1645         qat_sym_session_init_common_hdr(header, qat_proto_flag);
1646
1647         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1648         cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
1649         cipher->cipher_config.val =
1650             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1651                                         cdesc->qat_cipher_alg, key_convert,
1652                                         cdesc->qat_dir);
1653
1654         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1655                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1656                                         sizeof(struct icp_qat_hw_cipher_config)
1657                                         + cipherkeylen);
1658                 memcpy(cipher->key, cipherkey, cipherkeylen);
1659                 memcpy(temp_key, cipherkey, cipherkeylen);
1660
1661                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1662                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1663                                                                 wordIndex++)
1664                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1665
1666                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1667                                         cipherkeylen + cipherkeylen;
1668         } else if (cdesc->is_ucs) {
1669                 const uint8_t *final_key = cipherkey;
1670
1671                 total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
1672                         ICP_QAT_HW_AES_128_KEY_SZ);
1673                 cipher20->cipher_config.reserved[0] = 0;
1674                 cipher20->cipher_config.reserved[1] = 0;
1675                 cipher20->cipher_config.reserved[2] = 0;
1676
1677                 rte_memcpy(cipher20->key, final_key, cipherkeylen);
1678                 cdesc->cd_cur_ptr +=
1679                         sizeof(struct icp_qat_hw_ucs_cipher_config) +
1680                                         cipherkeylen;
1681         } else {
1682                 memcpy(cipher->key, cipherkey, cipherkeylen);
1683                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1684                                         cipherkeylen;
1685         }
1686
1687         if (total_key_size > cipherkeylen) {
1688                 uint32_t padding_size =  total_key_size-cipherkeylen;
1689                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1690                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1691                         /* K3 not provided so use K1 = K3*/
1692                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1693                 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1694                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1695                         /* K2 and K3 not provided so use K1 = K2 = K3*/
1696                         memcpy(cdesc->cd_cur_ptr, cipherkey,
1697                                 cipherkeylen);
1698                         memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1699                                 cipherkey, cipherkeylen);
1700                 } else
1701                         memset(cdesc->cd_cur_ptr, 0, padding_size);
1702
1703                 cdesc->cd_cur_ptr += padding_size;
1704         }
1705         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1706         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1707         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1708
1709         return 0;
1710 }
1711
1712 int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
1713                                                 const uint8_t *authkey,
1714                                                 uint32_t authkeylen,
1715                                                 uint32_t aad_length,
1716                                                 uint32_t digestsize,
1717                                                 unsigned int operation)
1718 {
1719         struct icp_qat_hw_auth_setup *hash;
1720         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1721         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1722         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1723         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1724         void *ptr = &req_tmpl->cd_ctrl;
1725         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1726         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1727         struct icp_qat_fw_la_auth_req_params *auth_param =
1728                 (struct icp_qat_fw_la_auth_req_params *)
1729                 ((char *)&req_tmpl->serv_specif_rqpars +
1730                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1731         uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1732         uint16_t hash_offset, cd_size;
1733         uint32_t *aad_len = NULL;
1734         uint32_t wordIndex  = 0;
1735         uint32_t *pTempKey;
1736         enum qat_sym_proto_flag qat_proto_flag =
1737                 QAT_CRYPTO_PROTO_FLAG_NONE;
1738
1739         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1740                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1741                                         ICP_QAT_FW_SLICE_AUTH);
1742                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1743                                         ICP_QAT_FW_SLICE_DRAM_WR);
1744                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1745         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1746                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1747                                 ICP_QAT_FW_SLICE_AUTH);
1748                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1749                                 ICP_QAT_FW_SLICE_CIPHER);
1750                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1751                                 ICP_QAT_FW_SLICE_CIPHER);
1752                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1753                                 ICP_QAT_FW_SLICE_DRAM_WR);
1754                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1755         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1756                 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1757                 return -EFAULT;
1758         }
1759
1760         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1761                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1762                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1763                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1764                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
1765                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
1766         } else {
1767                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1768                                            ICP_QAT_FW_LA_RET_AUTH_RES);
1769                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1770                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1771                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
1772         }
1773
1774         /*
1775          * Setup the inner hash config
1776          */
1777         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1778         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
1779         hash->auth_config.reserved = 0;
1780         hash->auth_config.config =
1781                         ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
1782                                 cdesc->qat_hash_alg, digestsize);
1783
1784         if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
1785                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
1786                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
1787                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
1788                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
1789                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
1790                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
1791                         )
1792                 hash->auth_counter.counter = 0;
1793         else {
1794                 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
1795
1796                 if (block_size < 0)
1797                         return block_size;
1798                 hash->auth_counter.counter = rte_bswap32(block_size);
1799         }
1800
1801         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
1802
1803         /*
1804          * cd_cur_ptr now points at the state1 information.
1805          */
1806         switch (cdesc->qat_hash_alg) {
1807         case ICP_QAT_HW_AUTH_ALGO_SHA1:
1808                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1809                         /* Plain SHA-1 */
1810                         rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
1811                                         sizeof(sha1InitialState));
1812                         state1_size = qat_hash_get_state1_size(
1813                                         cdesc->qat_hash_alg);
1814                         break;
1815                 }
1816                 /* SHA-1 HMAC */
1817                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
1818                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1819                         cdesc->aes_cmac)) {
1820                         QAT_LOG(ERR, "(SHA)precompute failed");
1821                         return -EFAULT;
1822                 }
1823                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
1824                 break;
1825         case ICP_QAT_HW_AUTH_ALGO_SHA224:
1826                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1827                         /* Plain SHA-224 */
1828                         rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
1829                                         sizeof(sha224InitialState));
1830                         state1_size = qat_hash_get_state1_size(
1831                                         cdesc->qat_hash_alg);
1832                         break;
1833                 }
1834                 /* SHA-224 HMAC */
1835                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
1836                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1837                         cdesc->aes_cmac)) {
1838                         QAT_LOG(ERR, "(SHA)precompute failed");
1839                         return -EFAULT;
1840                 }
1841                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
1842                 break;
1843         case ICP_QAT_HW_AUTH_ALGO_SHA256:
1844                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1845                         /* Plain SHA-256 */
1846                         rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
1847                                         sizeof(sha256InitialState));
1848                         state1_size = qat_hash_get_state1_size(
1849                                         cdesc->qat_hash_alg);
1850                         break;
1851                 }
1852                 /* SHA-256 HMAC */
1853                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
1854                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1855                         cdesc->aes_cmac)) {
1856                         QAT_LOG(ERR, "(SHA)precompute failed");
1857                         return -EFAULT;
1858                 }
1859                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
1860                 break;
1861         case ICP_QAT_HW_AUTH_ALGO_SHA384:
1862                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1863                         /* Plain SHA-384 */
1864                         rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
1865                                         sizeof(sha384InitialState));
1866                         state1_size = qat_hash_get_state1_size(
1867                                         cdesc->qat_hash_alg);
1868                         break;
1869                 }
1870                 /* SHA-384 HMAC */
1871                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
1872                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1873                         cdesc->aes_cmac)) {
1874                         QAT_LOG(ERR, "(SHA)precompute failed");
1875                         return -EFAULT;
1876                 }
1877                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
1878                 break;
1879         case ICP_QAT_HW_AUTH_ALGO_SHA512:
1880                 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1881                         /* Plain SHA-512 */
1882                         rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
1883                                         sizeof(sha512InitialState));
1884                         state1_size = qat_hash_get_state1_size(
1885                                         cdesc->qat_hash_alg);
1886                         break;
1887                 }
1888                 /* SHA-512 HMAC */
1889                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
1890                         authkeylen, cdesc->cd_cur_ptr,  &state1_size,
1891                         cdesc->aes_cmac)) {
1892                         QAT_LOG(ERR, "(SHA)precompute failed");
1893                         return -EFAULT;
1894                 }
1895                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
1896                 break;
1897         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1898                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1899
1900                 if (cdesc->aes_cmac)
1901                         memset(cdesc->cd_cur_ptr, 0, state1_size);
1902                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
1903                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
1904                         &state2_size, cdesc->aes_cmac)) {
1905                         cdesc->aes_cmac ? QAT_LOG(ERR,
1906                                                   "(CMAC)precompute failed")
1907                                         : QAT_LOG(ERR,
1908                                                   "(XCBC)precompute failed");
1909                         return -EFAULT;
1910                 }
1911                 break;
1912         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1913         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1914                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1915                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
1916                 if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
1917                         authkeylen, cdesc->cd_cur_ptr + state1_size,
1918                         &state2_size, cdesc->aes_cmac)) {
1919                         QAT_LOG(ERR, "(GCM)precompute failed");
1920                         return -EFAULT;
1921                 }
1922                 /*
1923                  * Write (the length of AAD) into bytes 16-19 of state2
1924                  * in big-endian format. This field is 8 bytes
1925                  */
1926                 auth_param->u2.aad_sz =
1927                                 RTE_ALIGN_CEIL(aad_length, 16);
1928                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
1929
1930                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
1931                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
1932                                         ICP_QAT_HW_GALOIS_H_SZ);
1933                 *aad_len = rte_bswap32(aad_length);
1934                 cdesc->aad_len = aad_length;
1935                 break;
1936         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1937                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1938                 state1_size = qat_hash_get_state1_size(
1939                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
1940                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
1941                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1942
1943                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
1944                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
1945                 cipherconfig->cipher_config.val =
1946                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
1947                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
1948                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
1949                         ICP_QAT_HW_CIPHER_ENCRYPT);
1950                 memcpy(cipherconfig->key, authkey, authkeylen);
1951                 memset(cipherconfig->key + authkeylen,
1952                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
1953                 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
1954                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1955                 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1956                 break;
1957         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1958                 hash->auth_config.config =
1959                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
1960                                 cdesc->qat_hash_alg, digestsize);
1961                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1962                 state1_size = qat_hash_get_state1_size(
1963                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
1964                 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
1965                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
1966                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
1967
1968                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1969                 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1970                 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1971                 cdesc->min_qat_dev_gen = QAT_GEN2;
1972
1973                 break;
1974         case ICP_QAT_HW_AUTH_ALGO_MD5:
1975                 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
1976                         authkeylen, cdesc->cd_cur_ptr, &state1_size,
1977                         cdesc->aes_cmac)) {
1978                         QAT_LOG(ERR, "(MD5)precompute failed");
1979                         return -EFAULT;
1980                 }
1981                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
1982                 break;
1983         case ICP_QAT_HW_AUTH_ALGO_NULL:
1984                 state1_size = qat_hash_get_state1_size(
1985                                 ICP_QAT_HW_AUTH_ALGO_NULL);
1986                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
1987                 break;
1988         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1989                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
1990                 state1_size = qat_hash_get_state1_size(
1991                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
1992                 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
1993                                 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
1994
1995                 if (aad_length > 0) {
1996                         aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
1997                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
1998                         auth_param->u2.aad_sz =
1999                         RTE_ALIGN_CEIL(aad_length,
2000                         ICP_QAT_HW_CCM_AAD_ALIGNMENT);
2001                 } else {
2002                         auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
2003                 }
2004                 cdesc->aad_len = aad_length;
2005                 hash->auth_counter.counter = 0;
2006
2007                 hash_cd_ctrl->outer_prefix_sz = digestsize;
2008                 auth_param->hash_state_sz = digestsize;
2009
2010                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2011                 break;
2012         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
2013                 state1_size = qat_hash_get_state1_size(
2014                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
2015                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
2016                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
2017                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
2018                                                         + authkeylen);
2019                 /*
2020                 * The Inner Hash Initial State2 block must contain IK
2021                 * (Initialisation Key), followed by IK XOR-ed with KM
2022                 * (Key Modifier): IK||(IK^KM).
2023                 */
2024                 /* write the auth key */
2025                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
2026                 /* initialise temp key with auth key */
2027                 memcpy(pTempKey, authkey, authkeylen);
2028                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
2029                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
2030                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
2031                 break;
2032         default:
2033                 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
2034                 return -EFAULT;
2035         }
2036
2037         /* Request template setup */
2038         qat_sym_session_init_common_hdr(header, qat_proto_flag);
2039         header->service_cmd_id = cdesc->qat_cmd;
2040
2041         /* Auth CD config setup */
2042         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
2043         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
2044         hash_cd_ctrl->inner_res_sz = digestsize;
2045         hash_cd_ctrl->final_sz = digestsize;
2046         hash_cd_ctrl->inner_state1_sz = state1_size;
2047         auth_param->auth_res_sz = digestsize;
2048
2049         hash_cd_ctrl->inner_state2_sz  = state2_size;
2050         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
2051                         ((sizeof(struct icp_qat_hw_auth_setup) +
2052                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
2053                                         >> 3);
2054
2055         cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
2056         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
2057
2058         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
2059         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
2060
2061         return 0;
2062 }
2063
2064 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2065 {
2066         switch (key_len) {
2067         case ICP_QAT_HW_AES_128_KEY_SZ:
2068                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2069                 break;
2070         case ICP_QAT_HW_AES_192_KEY_SZ:
2071                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2072                 break;
2073         case ICP_QAT_HW_AES_256_KEY_SZ:
2074                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2075                 break;
2076         default:
2077                 return -EINVAL;
2078         }
2079         return 0;
2080 }
2081
2082 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2083                 enum icp_qat_hw_cipher_algo *alg)
2084 {
2085         switch (key_len) {
2086         case ICP_QAT_HW_AES_128_KEY_SZ:
2087                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2088                 break;
2089         case ICP_QAT_HW_AES_256_KEY_SZ:
2090                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2091                 break;
2092         default:
2093                 return -EINVAL;
2094         }
2095         return 0;
2096 }
2097
2098 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2099 {
2100         switch (key_len) {
2101         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2102                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2103                 break;
2104         default:
2105                 return -EINVAL;
2106         }
2107         return 0;
2108 }
2109
2110 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2111 {
2112         switch (key_len) {
2113         case ICP_QAT_HW_KASUMI_KEY_SZ:
2114                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2115                 break;
2116         default:
2117                 return -EINVAL;
2118         }
2119         return 0;
2120 }
2121
2122 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2123 {
2124         switch (key_len) {
2125         case ICP_QAT_HW_DES_KEY_SZ:
2126                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2127                 break;
2128         default:
2129                 return -EINVAL;
2130         }
2131         return 0;
2132 }
2133
2134 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2135 {
2136         switch (key_len) {
2137         case QAT_3DES_KEY_SZ_OPT1:
2138         case QAT_3DES_KEY_SZ_OPT2:
2139         case QAT_3DES_KEY_SZ_OPT3:
2140                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2141                 break;
2142         default:
2143                 return -EINVAL;
2144         }
2145         return 0;
2146 }
2147
2148 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2149 {
2150         switch (key_len) {
2151         case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2152                 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2153                 break;
2154         default:
2155                 return -EINVAL;
2156         }
2157         return 0;
2158 }
2159
2160 #ifdef RTE_LIB_SECURITY
2161 static int
2162 qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
2163 {
2164         struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
2165         struct rte_security_docsis_xform *docsis = &conf->docsis;
2166
2167         /* CRC generate -> Cipher encrypt */
2168         if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
2169
2170                 if (crypto_sym != NULL &&
2171                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2172                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
2173                     crypto_sym->cipher.algo ==
2174                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2175                     (crypto_sym->cipher.key.length ==
2176                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2177                      crypto_sym->cipher.key.length ==
2178                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2179                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2180                     crypto_sym->next == NULL) {
2181                         return 0;
2182                 }
2183         /* Cipher decrypt -> CRC verify */
2184         } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
2185
2186                 if (crypto_sym != NULL &&
2187                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2188                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
2189                     crypto_sym->cipher.algo ==
2190                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
2191                     (crypto_sym->cipher.key.length ==
2192                                         ICP_QAT_HW_AES_128_KEY_SZ ||
2193                      crypto_sym->cipher.key.length ==
2194                                         ICP_QAT_HW_AES_256_KEY_SZ) &&
2195                     crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
2196                     crypto_sym->next == NULL) {
2197                         return 0;
2198                 }
2199         }
2200
2201         return -EINVAL;
2202 }
2203
2204 static int
2205 qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
2206                 struct rte_security_session_conf *conf, void *session_private)
2207 {
2208         int ret;
2209         int qat_cmd_id;
2210         struct rte_crypto_sym_xform *xform = NULL;
2211         struct qat_sym_session *session = session_private;
2212
2213         /* Clear the session */
2214         memset(session, 0, qat_sym_session_get_private_size(dev));
2215
2216         ret = qat_sec_session_check_docsis(conf);
2217         if (ret) {
2218                 QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
2219                 return ret;
2220         }
2221
2222         xform = conf->crypto_xform;
2223
2224         /* Verify the session physical address is known */
2225         rte_iova_t session_paddr = rte_mempool_virt2iova(session);
2226         if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
2227                 QAT_LOG(ERR,
2228                         "Session physical address unknown. Bad memory pool.");
2229                 return -EINVAL;
2230         }
2231
2232         /* Set context descriptor physical address */
2233         session->cd_paddr = session_paddr +
2234                         offsetof(struct qat_sym_session, cd);
2235
2236         session->min_qat_dev_gen = QAT_GEN1;
2237
2238         /* Get requested QAT command id - should be cipher */
2239         qat_cmd_id = qat_get_cmd_id(xform);
2240         if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
2241                 QAT_LOG(ERR, "Unsupported xform chain requested");
2242                 return -ENOTSUP;
2243         }
2244         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
2245
2246         ret = qat_sym_session_configure_cipher(dev, xform, session);
2247         if (ret < 0)
2248                 return ret;
2249
2250         return 0;
2251 }
2252
2253 int
2254 qat_security_session_create(void *dev,
2255                                 struct rte_security_session_conf *conf,
2256                                 struct rte_security_session *sess,
2257                                 struct rte_mempool *mempool)
2258 {
2259         void *sess_private_data;
2260         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2261         int ret;
2262
2263         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2264                         conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2265                 QAT_LOG(ERR, "Invalid security protocol");
2266                 return -EINVAL;
2267         }
2268
2269         if (rte_mempool_get(mempool, &sess_private_data)) {
2270                 QAT_LOG(ERR, "Couldn't get object from session mempool");
2271                 return -ENOMEM;
2272         }
2273
2274         ret = qat_sec_session_set_docsis_parameters(cdev, conf,
2275                         sess_private_data);
2276         if (ret != 0) {
2277                 QAT_LOG(ERR, "Failed to configure session parameters");
2278                 /* Return session to mempool */
2279                 rte_mempool_put(mempool, sess_private_data);
2280                 return ret;
2281         }
2282
2283         set_sec_session_private_data(sess, sess_private_data);
2284
2285         return ret;
2286 }
2287
2288 int
2289 qat_security_session_destroy(void *dev __rte_unused,
2290                                  struct rte_security_session *sess)
2291 {
2292         void *sess_priv = get_sec_session_private_data(sess);
2293         struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
2294
2295         if (sess_priv) {
2296                 if (s->bpi_ctx)
2297                         bpi_cipher_ctx_free(s->bpi_ctx);
2298                 memset(s, 0, qat_sym_session_get_private_size(dev));
2299                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2300
2301                 set_sec_session_private_data(sess, NULL);
2302                 rte_mempool_put(sess_mp, sess_priv);
2303         }
2304         return 0;
2305 }
2306 #endif