7f2c2c86b68e3ba0287de577971caf2141d56cd8
[dpdk.git] / drivers / crypto / qat / qat_crypto.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2018 Intel Corporation
3  */
4
5 #include <rte_mempool.h>
6 #include <rte_mbuf.h>
7 #include <rte_hexdump.h>
8 #include <rte_crypto_sym.h>
9 #include <rte_byteorder.h>
10 #include <rte_pci.h>
11 #include <rte_bus_pci.h>
12
13 #include <openssl/evp.h>
14
15 #include "qat_logs.h"
16 #include "qat_algs.h"
17 #include "qat_crypto.h"
18 #include "adf_transport_access_macros.h"
19
20 #define BYTE_LENGTH    8
21 /* bpi is only used for partial blocks of DES and AES
22  * so AES block len can be assumed as max len for iv, src and dst
23  */
24 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
25
26 static int
27 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
28                 struct qat_pmd_private *internals) {
29         int i = 0;
30         const struct rte_cryptodev_capabilities *capability;
31
32         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
33                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
34                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
35                         continue;
36
37                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
38                         continue;
39
40                 if (capability->sym.cipher.algo == algo)
41                         return 1;
42         }
43         return 0;
44 }
45
46 static int
47 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
48                 struct qat_pmd_private *internals) {
49         int i = 0;
50         const struct rte_cryptodev_capabilities *capability;
51
52         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
53                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
54                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
55                         continue;
56
57                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
58                         continue;
59
60                 if (capability->sym.auth.algo == algo)
61                         return 1;
62         }
63         return 0;
64 }
65
66 /** Encrypt a single partial block
67  *  Depends on openssl libcrypto
68  *  Uses ECB+XOR to do CFB encryption, same result, more performant
69  */
70 static inline int
71 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
72                 uint8_t *iv, int ivlen, int srclen,
73                 void *bpi_ctx)
74 {
75         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
76         int encrypted_ivlen;
77         uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
78         uint8_t *encr = encrypted_iv;
79
80         /* ECB method: encrypt the IV, then XOR this with plaintext */
81         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
82                                                                 <= 0)
83                 goto cipher_encrypt_err;
84
85         for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
86                 *dst = *src ^ *encr;
87
88         return 0;
89
90 cipher_encrypt_err:
91         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
92         return -EINVAL;
93 }
94
95 /** Decrypt a single partial block
96  *  Depends on openssl libcrypto
97  *  Uses ECB+XOR to do CFB encryption, same result, more performant
98  */
99 static inline int
100 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
101                 uint8_t *iv, int ivlen, int srclen,
102                 void *bpi_ctx)
103 {
104         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
105         int encrypted_ivlen;
106         uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
107         uint8_t *encr = encrypted_iv;
108
109         /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
110         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
111                                                                 <= 0)
112                 goto cipher_decrypt_err;
113
114         for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
115                 *dst = *src ^ *encr;
116
117         return 0;
118
119 cipher_decrypt_err:
120         PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
121         return -EINVAL;
122 }
123
124 /** Creates a context in either AES or DES in ECB mode
125  *  Depends on openssl libcrypto
126  */
127 static int
128 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
129                 enum rte_crypto_cipher_operation direction __rte_unused,
130                 uint8_t *key, void **ctx)
131 {
132         const EVP_CIPHER *algo = NULL;
133         int ret;
134         *ctx = EVP_CIPHER_CTX_new();
135
136         if (*ctx == NULL) {
137                 ret = -ENOMEM;
138                 goto ctx_init_err;
139         }
140
141         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
142                 algo = EVP_des_ecb();
143         else
144                 algo = EVP_aes_128_ecb();
145
146         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
147         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
148                 ret = -EINVAL;
149                 goto ctx_init_err;
150         }
151
152         return 0;
153
154 ctx_init_err:
155         if (*ctx != NULL)
156                 EVP_CIPHER_CTX_free(*ctx);
157         return ret;
158 }
159
160 /** Frees a context previously created
161  *  Depends on openssl libcrypto
162  */
163 static void
164 bpi_cipher_ctx_free(void *bpi_ctx)
165 {
166         if (bpi_ctx != NULL)
167                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
168 }
169
170 static inline uint32_t
171 adf_modulo(uint32_t data, uint32_t shift);
172
173 static inline int
174 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
175                 struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
176
177 void
178 qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
179                 struct rte_cryptodev_sym_session *sess)
180 {
181         PMD_INIT_FUNC_TRACE();
182         uint8_t index = dev->driver_id;
183         void *sess_priv = get_session_private_data(sess, index);
184         struct qat_session *s = (struct qat_session *)sess_priv;
185
186         if (sess_priv) {
187                 if (s->bpi_ctx)
188                         bpi_cipher_ctx_free(s->bpi_ctx);
189                 memset(s, 0, qat_crypto_sym_get_session_private_size(dev));
190                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
191                 set_session_private_data(sess, index, NULL);
192                 rte_mempool_put(sess_mp, sess_priv);
193         }
194 }
195
196 static int
197 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
198 {
199         /* Cipher Only */
200         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
201                 return ICP_QAT_FW_LA_CMD_CIPHER;
202
203         /* Authentication Only */
204         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
205                 return ICP_QAT_FW_LA_CMD_AUTH;
206
207         /* AEAD */
208         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
209                 /* AES-GCM and AES-CCM works with different direction
210                  * GCM first encrypts and generate hash where AES-CCM
211                  * first generate hash and encrypts. Similar relation
212                  * applies to decryption.
213                  */
214                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
215                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
216                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
217                         else
218                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
219                 else
220                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
221                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
222                         else
223                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
224         }
225
226         if (xform->next == NULL)
227                 return -1;
228
229         /* Cipher then Authenticate */
230         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
231                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
232                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
233
234         /* Authenticate then Cipher */
235         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
236                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
237                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
238
239         return -1;
240 }
241
242 static struct rte_crypto_auth_xform *
243 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
244 {
245         do {
246                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
247                         return &xform->auth;
248
249                 xform = xform->next;
250         } while (xform);
251
252         return NULL;
253 }
254
255 static struct rte_crypto_cipher_xform *
256 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
257 {
258         do {
259                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
260                         return &xform->cipher;
261
262                 xform = xform->next;
263         } while (xform);
264
265         return NULL;
266 }
267
268 int
269 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
270                 struct rte_crypto_sym_xform *xform,
271                 struct qat_session *session)
272 {
273         struct qat_pmd_private *internals = dev->data->dev_private;
274         struct rte_crypto_cipher_xform *cipher_xform = NULL;
275         int ret;
276
277         /* Get cipher xform from crypto xform chain */
278         cipher_xform = qat_get_cipher_xform(xform);
279
280         session->cipher_iv.offset = cipher_xform->iv.offset;
281         session->cipher_iv.length = cipher_xform->iv.length;
282
283         switch (cipher_xform->algo) {
284         case RTE_CRYPTO_CIPHER_AES_CBC:
285                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
286                                 &session->qat_cipher_alg) != 0) {
287                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
288                         ret = -EINVAL;
289                         goto error_out;
290                 }
291                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
292                 break;
293         case RTE_CRYPTO_CIPHER_AES_CTR:
294                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
295                                 &session->qat_cipher_alg) != 0) {
296                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
297                         ret = -EINVAL;
298                         goto error_out;
299                 }
300                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
301                 break;
302         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
303                 if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
304                                         &session->qat_cipher_alg) != 0) {
305                         PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
306                         ret = -EINVAL;
307                         goto error_out;
308                 }
309                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
310                 break;
311         case RTE_CRYPTO_CIPHER_NULL:
312                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
313                 break;
314         case RTE_CRYPTO_CIPHER_KASUMI_F8:
315                 if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
316                                         &session->qat_cipher_alg) != 0) {
317                         PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
318                         ret = -EINVAL;
319                         goto error_out;
320                 }
321                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
322                 break;
323         case RTE_CRYPTO_CIPHER_3DES_CBC:
324                 if (qat_alg_validate_3des_key(cipher_xform->key.length,
325                                 &session->qat_cipher_alg) != 0) {
326                         PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
327                         ret = -EINVAL;
328                         goto error_out;
329                 }
330                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
331                 break;
332         case RTE_CRYPTO_CIPHER_DES_CBC:
333                 if (qat_alg_validate_des_key(cipher_xform->key.length,
334                                 &session->qat_cipher_alg) != 0) {
335                         PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
336                         ret = -EINVAL;
337                         goto error_out;
338                 }
339                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
340                 break;
341         case RTE_CRYPTO_CIPHER_3DES_CTR:
342                 if (qat_alg_validate_3des_key(cipher_xform->key.length,
343                                 &session->qat_cipher_alg) != 0) {
344                         PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
345                         ret = -EINVAL;
346                         goto error_out;
347                 }
348                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
349                 break;
350         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
351                 ret = bpi_cipher_ctx_init(
352                                         cipher_xform->algo,
353                                         cipher_xform->op,
354                                         cipher_xform->key.data,
355                                         &session->bpi_ctx);
356                 if (ret != 0) {
357                         PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
358                         goto error_out;
359                 }
360                 if (qat_alg_validate_des_key(cipher_xform->key.length,
361                                 &session->qat_cipher_alg) != 0) {
362                         PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
363                         ret = -EINVAL;
364                         goto error_out;
365                 }
366                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
367                 break;
368         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
369                 ret = bpi_cipher_ctx_init(
370                                         cipher_xform->algo,
371                                         cipher_xform->op,
372                                         cipher_xform->key.data,
373                                         &session->bpi_ctx);
374                 if (ret != 0) {
375                         PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
376                         goto error_out;
377                 }
378                 if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
379                                 &session->qat_cipher_alg) != 0) {
380                         PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
381                         ret = -EINVAL;
382                         goto error_out;
383                 }
384                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
385                 break;
386         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
387                 if (!qat_is_cipher_alg_supported(
388                         cipher_xform->algo, internals)) {
389                         PMD_DRV_LOG(ERR, "%s not supported on this device",
390                                 rte_crypto_cipher_algorithm_strings
391                                         [cipher_xform->algo]);
392                         ret = -ENOTSUP;
393                         goto error_out;
394                 }
395                 if (qat_alg_validate_zuc_key(cipher_xform->key.length,
396                                 &session->qat_cipher_alg) != 0) {
397                         PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
398                         ret = -EINVAL;
399                         goto error_out;
400                 }
401                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
402                 break;
403         case RTE_CRYPTO_CIPHER_3DES_ECB:
404         case RTE_CRYPTO_CIPHER_AES_ECB:
405         case RTE_CRYPTO_CIPHER_AES_F8:
406         case RTE_CRYPTO_CIPHER_AES_XTS:
407         case RTE_CRYPTO_CIPHER_ARC4:
408                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
409                                 cipher_xform->algo);
410                 ret = -ENOTSUP;
411                 goto error_out;
412         default:
413                 PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
414                                 cipher_xform->algo);
415                 ret = -EINVAL;
416                 goto error_out;
417         }
418
419         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
420                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
421         else
422                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
423
424         if (qat_alg_aead_session_create_content_desc_cipher(session,
425                                                 cipher_xform->key.data,
426                                                 cipher_xform->key.length)) {
427                 ret = -EINVAL;
428                 goto error_out;
429         }
430
431         return 0;
432
433 error_out:
434         if (session->bpi_ctx) {
435                 bpi_cipher_ctx_free(session->bpi_ctx);
436                 session->bpi_ctx = NULL;
437         }
438         return ret;
439 }
440
441 int
442 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
443                 struct rte_crypto_sym_xform *xform,
444                 struct rte_cryptodev_sym_session *sess,
445                 struct rte_mempool *mempool)
446 {
447         void *sess_private_data;
448         int ret;
449
450         if (rte_mempool_get(mempool, &sess_private_data)) {
451                 CDEV_LOG_ERR(
452                         "Couldn't get object from session mempool");
453                 return -ENOMEM;
454         }
455
456         ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data);
457         if (ret != 0) {
458                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure "
459                                 "session parameters");
460
461                 /* Return session to mempool */
462                 rte_mempool_put(mempool, sess_private_data);
463                 return ret;
464         }
465
466         set_session_private_data(sess, dev->driver_id,
467                 sess_private_data);
468
469         return 0;
470 }
471
472 int
473 qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
474                 struct rte_crypto_sym_xform *xform, void *session_private)
475 {
476         struct qat_session *session = session_private;
477         int ret;
478
479         int qat_cmd_id;
480         PMD_INIT_FUNC_TRACE();
481
482         /* Set context descriptor physical address */
483         session->cd_paddr = rte_mempool_virt2iova(session) +
484                         offsetof(struct qat_session, cd);
485
486         session->min_qat_dev_gen = QAT_GEN1;
487
488         /* Get requested QAT command id */
489         qat_cmd_id = qat_get_cmd_id(xform);
490         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
491                 PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
492                 return -ENOTSUP;
493         }
494         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
495         switch (session->qat_cmd) {
496         case ICP_QAT_FW_LA_CMD_CIPHER:
497                 ret = qat_crypto_sym_configure_session_cipher(dev, xform, session);
498                 if (ret < 0)
499                         return ret;
500                 break;
501         case ICP_QAT_FW_LA_CMD_AUTH:
502                 ret = qat_crypto_sym_configure_session_auth(dev, xform, session);
503                 if (ret < 0)
504                         return ret;
505                 break;
506         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
507                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
508                         ret = qat_crypto_sym_configure_session_aead(xform,
509                                         session);
510                         if (ret < 0)
511                                 return ret;
512                 } else {
513                         ret = qat_crypto_sym_configure_session_cipher(dev,
514                                         xform, session);
515                         if (ret < 0)
516                                 return ret;
517                         ret = qat_crypto_sym_configure_session_auth(dev,
518                                         xform, session);
519                         if (ret < 0)
520                                 return ret;
521                 }
522                 break;
523         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
524                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
525                         ret = qat_crypto_sym_configure_session_aead(xform,
526                                         session);
527                         if (ret < 0)
528                                 return ret;
529                 } else {
530                         ret = qat_crypto_sym_configure_session_auth(dev,
531                                         xform, session);
532                         if (ret < 0)
533                                 return ret;
534                         ret = qat_crypto_sym_configure_session_cipher(dev,
535                                         xform, session);
536                         if (ret < 0)
537                                 return ret;
538                 }
539                 break;
540         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
541         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
542         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
543         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
544         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
545         case ICP_QAT_FW_LA_CMD_MGF1:
546         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
547         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
548         case ICP_QAT_FW_LA_CMD_DELIMITER:
549         PMD_DRV_LOG(ERR, "Unsupported Service %u",
550                 session->qat_cmd);
551                 return -ENOTSUP;
552         default:
553         PMD_DRV_LOG(ERR, "Unsupported Service %u",
554                 session->qat_cmd);
555                 return -ENOTSUP;
556         }
557
558         return 0;
559 }
560
561 int
562 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
563                                 struct rte_crypto_sym_xform *xform,
564                                 struct qat_session *session)
565 {
566         struct rte_crypto_auth_xform *auth_xform = NULL;
567         struct qat_pmd_private *internals = dev->data->dev_private;
568         auth_xform = qat_get_auth_xform(xform);
569         uint8_t *key_data = auth_xform->key.data;
570         uint8_t key_length = auth_xform->key.length;
571
572         switch (auth_xform->algo) {
573         case RTE_CRYPTO_AUTH_SHA1_HMAC:
574                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
575                 break;
576         case RTE_CRYPTO_AUTH_SHA224_HMAC:
577                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
578                 break;
579         case RTE_CRYPTO_AUTH_SHA256_HMAC:
580                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
581                 break;
582         case RTE_CRYPTO_AUTH_SHA384_HMAC:
583                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
584                 break;
585         case RTE_CRYPTO_AUTH_SHA512_HMAC:
586                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
587                 break;
588         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
589                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
590                 break;
591         case RTE_CRYPTO_AUTH_AES_GMAC:
592                 if (qat_alg_validate_aes_key(auth_xform->key.length,
593                                 &session->qat_cipher_alg) != 0) {
594                         PMD_DRV_LOG(ERR, "Invalid AES key size");
595                         return -EINVAL;
596                 }
597                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
598                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
599
600                 break;
601         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
602                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
603                 break;
604         case RTE_CRYPTO_AUTH_MD5_HMAC:
605                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
606                 break;
607         case RTE_CRYPTO_AUTH_NULL:
608                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
609                 break;
610         case RTE_CRYPTO_AUTH_KASUMI_F9:
611                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
612                 break;
613         case RTE_CRYPTO_AUTH_ZUC_EIA3:
614                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
615                         PMD_DRV_LOG(ERR, "%s not supported on this device",
616                                 rte_crypto_auth_algorithm_strings
617                                 [auth_xform->algo]);
618                         return -ENOTSUP;
619                 }
620                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
621                 break;
622         case RTE_CRYPTO_AUTH_SHA1:
623         case RTE_CRYPTO_AUTH_SHA256:
624         case RTE_CRYPTO_AUTH_SHA512:
625         case RTE_CRYPTO_AUTH_SHA224:
626         case RTE_CRYPTO_AUTH_SHA384:
627         case RTE_CRYPTO_AUTH_MD5:
628         case RTE_CRYPTO_AUTH_AES_CMAC:
629         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
630                 PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
631                                 auth_xform->algo);
632                 return -ENOTSUP;
633         default:
634                 PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
635                                 auth_xform->algo);
636                 return -EINVAL;
637         }
638
639         session->auth_iv.offset = auth_xform->iv.offset;
640         session->auth_iv.length = auth_xform->iv.length;
641
642         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
643                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
644                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
645                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
646                         /*
647                          * It needs to create cipher desc content first,
648                          * then authentication
649                          */
650                         if (qat_alg_aead_session_create_content_desc_cipher(session,
651                                                 auth_xform->key.data,
652                                                 auth_xform->key.length))
653                                 return -EINVAL;
654
655                         if (qat_alg_aead_session_create_content_desc_auth(session,
656                                                 key_data,
657                                                 key_length,
658                                                 0,
659                                                 auth_xform->digest_length,
660                                                 auth_xform->op))
661                                 return -EINVAL;
662                 } else {
663                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
664                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
665                         /*
666                          * It needs to create authentication desc content first,
667                          * then cipher
668                          */
669                         if (qat_alg_aead_session_create_content_desc_auth(session,
670                                         key_data,
671                                         key_length,
672                                         0,
673                                         auth_xform->digest_length,
674                                         auth_xform->op))
675                                 return -EINVAL;
676
677                         if (qat_alg_aead_session_create_content_desc_cipher(session,
678                                                 auth_xform->key.data,
679                                                 auth_xform->key.length))
680                                 return -EINVAL;
681                 }
682                 /* Restore to authentication only only */
683                 session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
684         } else {
685                 if (qat_alg_aead_session_create_content_desc_auth(session,
686                                 key_data,
687                                 key_length,
688                                 0,
689                                 auth_xform->digest_length,
690                                 auth_xform->op))
691                         return -EINVAL;
692         }
693
694         session->digest_length = auth_xform->digest_length;
695         return 0;
696 }
697
698 int
699 qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
700                                 struct qat_session *session)
701 {
702         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
703         enum rte_crypto_auth_operation crypto_operation;
704
705         /*
706          * Store AEAD IV parameters as cipher IV,
707          * to avoid unnecessary memory usage
708          */
709         session->cipher_iv.offset = xform->aead.iv.offset;
710         session->cipher_iv.length = xform->aead.iv.length;
711
712         switch (aead_xform->algo) {
713         case RTE_CRYPTO_AEAD_AES_GCM:
714                 if (qat_alg_validate_aes_key(aead_xform->key.length,
715                                 &session->qat_cipher_alg) != 0) {
716                         PMD_DRV_LOG(ERR, "Invalid AES key size");
717                         return -EINVAL;
718                 }
719                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
720                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
721                 break;
722         case RTE_CRYPTO_AEAD_AES_CCM:
723                 if (qat_alg_validate_aes_key(aead_xform->key.length,
724                                 &session->qat_cipher_alg) != 0) {
725                         PMD_DRV_LOG(ERR, "Invalid AES key size");
726                         return -EINVAL;
727                 }
728                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
729                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
730                 break;
731         default:
732                 PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
733                                 aead_xform->algo);
734                 return -EINVAL;
735         }
736
737         if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
738                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
739                         (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
740                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
741                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
742                 /*
743                  * It needs to create cipher desc content first,
744                  * then authentication
745                  */
746
747                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
748                         RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
749
750                 if (qat_alg_aead_session_create_content_desc_cipher(session,
751                                         aead_xform->key.data,
752                                         aead_xform->key.length))
753                         return -EINVAL;
754
755                 if (qat_alg_aead_session_create_content_desc_auth(session,
756                                         aead_xform->key.data,
757                                         aead_xform->key.length,
758                                         aead_xform->aad_length,
759                                         aead_xform->digest_length,
760                                         crypto_operation))
761                         return -EINVAL;
762         } else {
763                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
764                 /*
765                  * It needs to create authentication desc content first,
766                  * then cipher
767                  */
768
769                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
770                         RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
771
772                 if (qat_alg_aead_session_create_content_desc_auth(session,
773                                         aead_xform->key.data,
774                                         aead_xform->key.length,
775                                         aead_xform->aad_length,
776                                         aead_xform->digest_length,
777                                         crypto_operation))
778                         return -EINVAL;
779
780                 if (qat_alg_aead_session_create_content_desc_cipher(session,
781                                         aead_xform->key.data,
782                                         aead_xform->key.length))
783                         return -EINVAL;
784         }
785
786         session->digest_length = aead_xform->digest_length;
787         return 0;
788 }
789
790 unsigned qat_crypto_sym_get_session_private_size(
791                 struct rte_cryptodev *dev __rte_unused)
792 {
793         return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
794 }
795
796 static inline uint32_t
797 qat_bpicipher_preprocess(struct qat_session *ctx,
798                                 struct rte_crypto_op *op)
799 {
800         int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
801         struct rte_crypto_sym_op *sym_op = op->sym;
802         uint8_t last_block_len = block_len > 0 ?
803                         sym_op->cipher.data.length % block_len : 0;
804
805         if (last_block_len &&
806                         ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
807
808                 /* Decrypt last block */
809                 uint8_t *last_block, *dst, *iv;
810                 uint32_t last_block_offset = sym_op->cipher.data.offset +
811                                 sym_op->cipher.data.length - last_block_len;
812                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
813                                 uint8_t *, last_block_offset);
814
815                 if (unlikely(sym_op->m_dst != NULL))
816                         /* out-of-place operation (OOP) */
817                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
818                                                 uint8_t *, last_block_offset);
819                 else
820                         dst = last_block;
821
822                 if (last_block_len < sym_op->cipher.data.length)
823                         /* use previous block ciphertext as IV */
824                         iv = last_block - block_len;
825                 else
826                         /* runt block, i.e. less than one full block */
827                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
828                                         ctx->cipher_iv.offset);
829
830 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
831                 rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
832                         last_block_len);
833                 if (sym_op->m_dst != NULL)
834                         rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
835                                 last_block_len);
836 #endif
837                 bpi_cipher_decrypt(last_block, dst, iv, block_len,
838                                 last_block_len, ctx->bpi_ctx);
839 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
840                 rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
841                         last_block_len);
842                 if (sym_op->m_dst != NULL)
843                         rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
844                                 last_block_len);
845 #endif
846         }
847
848         return sym_op->cipher.data.length - last_block_len;
849 }
850
851 static inline uint32_t
852 qat_bpicipher_postprocess(struct qat_session *ctx,
853                                 struct rte_crypto_op *op)
854 {
855         int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
856         struct rte_crypto_sym_op *sym_op = op->sym;
857         uint8_t last_block_len = block_len > 0 ?
858                         sym_op->cipher.data.length % block_len : 0;
859
860         if (last_block_len > 0 &&
861                         ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
862
863                 /* Encrypt last block */
864                 uint8_t *last_block, *dst, *iv;
865                 uint32_t last_block_offset;
866
867                 last_block_offset = sym_op->cipher.data.offset +
868                                 sym_op->cipher.data.length - last_block_len;
869                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
870                                 uint8_t *, last_block_offset);
871
872                 if (unlikely(sym_op->m_dst != NULL))
873                         /* out-of-place operation (OOP) */
874                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
875                                                 uint8_t *, last_block_offset);
876                 else
877                         dst = last_block;
878
879                 if (last_block_len < sym_op->cipher.data.length)
880                         /* use previous block ciphertext as IV */
881                         iv = dst - block_len;
882                 else
883                         /* runt block, i.e. less than one full block */
884                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
885                                         ctx->cipher_iv.offset);
886
887 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
888                 rte_hexdump(stdout, "BPI: src before post-process:", last_block,
889                         last_block_len);
890                 if (sym_op->m_dst != NULL)
891                         rte_hexdump(stdout, "BPI: dst before post-process:",
892                                         dst, last_block_len);
893 #endif
894                 bpi_cipher_encrypt(last_block, dst, iv, block_len,
895                                 last_block_len, ctx->bpi_ctx);
896 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
897                 rte_hexdump(stdout, "BPI: src after post-process:", last_block,
898                         last_block_len);
899                 if (sym_op->m_dst != NULL)
900                         rte_hexdump(stdout, "BPI: dst after post-process:", dst,
901                                 last_block_len);
902 #endif
903         }
904         return sym_op->cipher.data.length - last_block_len;
905 }
906
907 static inline void
908 txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
909         WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
910                         q->hw_queue_number, q->tail);
911         q->nb_pending_requests = 0;
912         q->csr_tail = q->tail;
913 }
914
915 uint16_t
916 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
917                 uint16_t nb_ops)
918 {
919         register struct qat_queue *queue;
920         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
921         register uint32_t nb_ops_sent = 0;
922         register struct rte_crypto_op **cur_op = ops;
923         register int ret;
924         uint16_t nb_ops_possible = nb_ops;
925         register uint8_t *base_addr;
926         register uint32_t tail;
927         int overflow;
928
929         if (unlikely(nb_ops == 0))
930                 return 0;
931
932         /* read params used a lot in main loop into registers */
933         queue = &(tmp_qp->tx_q);
934         base_addr = (uint8_t *)queue->base_addr;
935         tail = queue->tail;
936
937         /* Find how many can actually fit on the ring */
938         tmp_qp->inflights16 += nb_ops;
939         overflow = tmp_qp->inflights16 - queue->max_inflights;
940         if (overflow > 0) {
941                 tmp_qp->inflights16 -= overflow;
942                 nb_ops_possible = nb_ops - overflow;
943                 if (nb_ops_possible == 0)
944                         return 0;
945         }
946
947         while (nb_ops_sent != nb_ops_possible) {
948                 ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
949                         tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
950                 if (ret != 0) {
951                         tmp_qp->stats.enqueue_err_count++;
952                         /*
953                          * This message cannot be enqueued,
954                          * decrease number of ops that wasn't sent
955                          */
956                         tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
957                         if (nb_ops_sent == 0)
958                                 return 0;
959                         goto kick_tail;
960                 }
961
962                 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
963                 nb_ops_sent++;
964                 cur_op++;
965         }
966 kick_tail:
967         queue->tail = tail;
968         tmp_qp->stats.enqueued_count += nb_ops_sent;
969         queue->nb_pending_requests += nb_ops_sent;
970         if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
971                         queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
972                 txq_write_tail(tmp_qp, queue);
973         }
974         return nb_ops_sent;
975 }
976
977 static inline
978 void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
979 {
980         uint32_t old_head, new_head;
981         uint32_t max_head;
982
983         old_head = q->csr_head;
984         new_head = q->head;
985         max_head = qp->nb_descriptors * q->msg_size;
986
987         /* write out free descriptors */
988         void *cur_desc = (uint8_t *)q->base_addr + old_head;
989
990         if (new_head < old_head) {
991                 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
992                 memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
993         } else {
994                 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
995         }
996         q->nb_processed_responses = 0;
997         q->csr_head = new_head;
998
999         /* write current head to CSR */
1000         WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
1001                             q->hw_queue_number, new_head);
1002 }
1003
1004 uint16_t
1005 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
1006                 uint16_t nb_ops)
1007 {
1008         struct qat_queue *rx_queue, *tx_queue;
1009         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
1010         uint32_t msg_counter = 0;
1011         struct rte_crypto_op *rx_op;
1012         struct icp_qat_fw_comn_resp *resp_msg;
1013         uint32_t head;
1014
1015         rx_queue = &(tmp_qp->rx_q);
1016         tx_queue = &(tmp_qp->tx_q);
1017         head = rx_queue->head;
1018         resp_msg = (struct icp_qat_fw_comn_resp *)
1019                         ((uint8_t *)rx_queue->base_addr + head);
1020
1021         while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
1022                         msg_counter != nb_ops) {
1023                 rx_op = (struct rte_crypto_op *)(uintptr_t)
1024                                 (resp_msg->opaque_data);
1025
1026 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
1027                 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
1028                         sizeof(struct icp_qat_fw_comn_resp));
1029 #endif
1030                 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
1031                                 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
1032                                         resp_msg->comn_hdr.comn_status)) {
1033                         rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1034                 } else {
1035                         struct qat_session *sess = (struct qat_session *)
1036                                         get_session_private_data(
1037                                         rx_op->sym->session,
1038                                         cryptodev_qat_driver_id);
1039
1040                         if (sess->bpi_ctx)
1041                                 qat_bpicipher_postprocess(sess, rx_op);
1042                         rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1043                 }
1044
1045                 head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
1046                 resp_msg = (struct icp_qat_fw_comn_resp *)
1047                                 ((uint8_t *)rx_queue->base_addr + head);
1048                 *ops = rx_op;
1049                 ops++;
1050                 msg_counter++;
1051         }
1052         if (msg_counter > 0) {
1053                 rx_queue->head = head;
1054                 tmp_qp->stats.dequeued_count += msg_counter;
1055                 rx_queue->nb_processed_responses += msg_counter;
1056                 tmp_qp->inflights16 -= msg_counter;
1057
1058                 if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
1059                         rxq_free_desc(tmp_qp, rx_queue);
1060         }
1061         /* also check if tail needs to be advanced */
1062         if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
1063                         tx_queue->tail != tx_queue->csr_tail) {
1064                 txq_write_tail(tmp_qp, tx_queue);
1065         }
1066         return msg_counter;
1067 }
1068
1069 static inline int
1070 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
1071                 struct qat_alg_buf_list *list, uint32_t data_len)
1072 {
1073         int nr = 1;
1074
1075         uint32_t buf_len = rte_pktmbuf_iova(buf) -
1076                         buff_start + rte_pktmbuf_data_len(buf);
1077
1078         list->bufers[0].addr = buff_start;
1079         list->bufers[0].resrvd = 0;
1080         list->bufers[0].len = buf_len;
1081
1082         if (data_len <= buf_len) {
1083                 list->num_bufs = nr;
1084                 list->bufers[0].len = data_len;
1085                 return 0;
1086         }
1087
1088         buf = buf->next;
1089         while (buf) {
1090                 if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
1091                         PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
1092                                         " entry(%u)",
1093                                         QAT_SGL_MAX_NUMBER);
1094                         return -EINVAL;
1095                 }
1096
1097                 list->bufers[nr].len = rte_pktmbuf_data_len(buf);
1098                 list->bufers[nr].resrvd = 0;
1099                 list->bufers[nr].addr = rte_pktmbuf_iova(buf);
1100
1101                 buf_len += list->bufers[nr].len;
1102                 buf = buf->next;
1103
1104                 if (buf_len > data_len) {
1105                         list->bufers[nr].len -=
1106                                 buf_len - data_len;
1107                         buf = NULL;
1108                 }
1109                 ++nr;
1110         }
1111         list->num_bufs = nr;
1112
1113         return 0;
1114 }
1115
1116 static inline void
1117 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
1118                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
1119                 struct rte_crypto_op *op,
1120                 struct icp_qat_fw_la_bulk_req *qat_req)
1121 {
1122         /* copy IV into request if it fits */
1123         if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
1124                 rte_memcpy(cipher_param->u.cipher_IV_array,
1125                                 rte_crypto_op_ctod_offset(op, uint8_t *,
1126                                         iv_offset),
1127                                 iv_length);
1128         } else {
1129                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
1130                                 qat_req->comn_hdr.serv_specif_flags,
1131                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
1132                 cipher_param->u.s.cipher_IV_ptr =
1133                                 rte_crypto_op_ctophys_offset(op,
1134                                         iv_offset);
1135         }
1136 }
1137
1138 /** Set IV for CCM is special case, 0th byte is set to q-1
1139  *  where q is padding of nonce in 16 byte block
1140  */
1141 static inline void
1142 set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
1143                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
1144                 struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
1145 {
1146         rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
1147                         ICP_QAT_HW_CCM_NONCE_OFFSET,
1148                         rte_crypto_op_ctod_offset(op, uint8_t *,
1149                                 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
1150                         iv_length);
1151         *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
1152                         q - ICP_QAT_HW_CCM_NONCE_OFFSET;
1153
1154         if (aad_len_field_sz)
1155                 rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
1156                         rte_crypto_op_ctod_offset(op, uint8_t *,
1157                                 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
1158                         iv_length);
1159 }
1160
1161 static inline int
1162 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
1163                 struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
1164 {
1165         int ret = 0;
1166         struct qat_session *ctx;
1167         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1168         struct icp_qat_fw_la_auth_req_params *auth_param;
1169         register struct icp_qat_fw_la_bulk_req *qat_req;
1170         uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
1171         uint32_t cipher_len = 0, cipher_ofs = 0;
1172         uint32_t auth_len = 0, auth_ofs = 0;
1173         uint32_t min_ofs = 0;
1174         uint64_t src_buf_start = 0, dst_buf_start = 0;
1175         uint8_t do_sgl = 0;
1176
1177 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1178         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
1179                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
1180                                 "operation requests, op (%p) is not a "
1181                                 "symmetric operation.", op);
1182                 return -EINVAL;
1183         }
1184 #endif
1185         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
1186                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
1187                                 " requests, op (%p) is sessionless.", op);
1188                 return -EINVAL;
1189         }
1190
1191         ctx = (struct qat_session *)get_session_private_data(
1192                         op->sym->session, cryptodev_qat_driver_id);
1193
1194         if (unlikely(ctx == NULL)) {
1195                 PMD_DRV_LOG(ERR, "Session was not created for this device");
1196                 return -EINVAL;
1197         }
1198
1199         if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
1200                 PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
1201                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1202                 return -EINVAL;
1203         }
1204
1205
1206
1207         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
1208         rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
1209         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
1210         cipher_param = (void *)&qat_req->serv_specif_rqpars;
1211         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
1212
1213         if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
1214                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1215                 /* AES-GCM or AES-CCM */
1216                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1217                                 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
1218                                 (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
1219                                 && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
1220                                 && ctx->qat_hash_alg ==
1221                                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
1222                         do_aead = 1;
1223                 } else {
1224                         do_auth = 1;
1225                         do_cipher = 1;
1226                 }
1227         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1228                 do_auth = 1;
1229                 do_cipher = 0;
1230         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1231                 do_auth = 0;
1232                 do_cipher = 1;
1233         }
1234
1235         if (do_cipher) {
1236
1237                 if (ctx->qat_cipher_alg ==
1238                                          ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
1239                         ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
1240                         ctx->qat_cipher_alg ==
1241                                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1242
1243                         if (unlikely(
1244                                 (cipher_param->cipher_length % BYTE_LENGTH != 0)
1245                                  || (cipher_param->cipher_offset
1246                                                         % BYTE_LENGTH != 0))) {
1247                                 PMD_DRV_LOG(ERR,
1248                   "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
1249                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1250                                 return -EINVAL;
1251                         }
1252                         cipher_len = op->sym->cipher.data.length >> 3;
1253                         cipher_ofs = op->sym->cipher.data.offset >> 3;
1254
1255                 } else if (ctx->bpi_ctx) {
1256                         /* DOCSIS - only send complete blocks to device
1257                          * Process any partial block using CFB mode.
1258                          * Even if 0 complete blocks, still send this to device
1259                          * to get into rx queue for post-process and dequeuing
1260                          */
1261                         cipher_len = qat_bpicipher_preprocess(ctx, op);
1262                         cipher_ofs = op->sym->cipher.data.offset;
1263                 } else {
1264                         cipher_len = op->sym->cipher.data.length;
1265                         cipher_ofs = op->sym->cipher.data.offset;
1266                 }
1267
1268                 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
1269                                 cipher_param, op, qat_req);
1270                 min_ofs = cipher_ofs;
1271         }
1272
1273         if (do_auth) {
1274
1275                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
1276                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
1277                         ctx->qat_hash_alg ==
1278                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
1279                         if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
1280                                 || (auth_param->auth_len % BYTE_LENGTH != 0))) {
1281                                 PMD_DRV_LOG(ERR,
1282                 "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
1283                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1284                                 return -EINVAL;
1285                         }
1286                         auth_ofs = op->sym->auth.data.offset >> 3;
1287                         auth_len = op->sym->auth.data.length >> 3;
1288
1289                         auth_param->u1.aad_adr =
1290                                         rte_crypto_op_ctophys_offset(op,
1291                                                         ctx->auth_iv.offset);
1292
1293                 } else if (ctx->qat_hash_alg ==
1294                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1295                                 ctx->qat_hash_alg ==
1296                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1297                         /* AES-GMAC */
1298                         set_cipher_iv(ctx->auth_iv.length,
1299                                 ctx->auth_iv.offset,
1300                                 cipher_param, op, qat_req);
1301                         auth_ofs = op->sym->auth.data.offset;
1302                         auth_len = op->sym->auth.data.length;
1303
1304                         auth_param->u1.aad_adr = 0;
1305                         auth_param->u2.aad_sz = 0;
1306
1307                         /*
1308                          * If len(iv)==12B fw computes J0
1309                          */
1310                         if (ctx->auth_iv.length == 12) {
1311                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1312                                         qat_req->comn_hdr.serv_specif_flags,
1313                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1314
1315                         }
1316                 } else {
1317                         auth_ofs = op->sym->auth.data.offset;
1318                         auth_len = op->sym->auth.data.length;
1319
1320                 }
1321                 min_ofs = auth_ofs;
1322
1323                 if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
1324                         auth_param->auth_res_addr =
1325                                         op->sym->auth.digest.phys_addr;
1326
1327         }
1328
1329         if (do_aead) {
1330                 /*
1331                  * This address may used for setting AAD physical pointer
1332                  * into IV offset from op
1333                  */
1334                 rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
1335                 if (ctx->qat_hash_alg ==
1336                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1337                                 ctx->qat_hash_alg ==
1338                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1339                         /*
1340                          * If len(iv)==12B fw computes J0
1341                          */
1342                         if (ctx->cipher_iv.length == 12) {
1343                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1344                                         qat_req->comn_hdr.serv_specif_flags,
1345                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1346                         }
1347
1348                         set_cipher_iv(ctx->cipher_iv.length,
1349                                         ctx->cipher_iv.offset,
1350                                         cipher_param, op, qat_req);
1351
1352                 } else if (ctx->qat_hash_alg ==
1353                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
1354
1355                         /* In case of AES-CCM this may point to user selected memory
1356                          * or iv offset in cypto_op
1357                          */
1358                         uint8_t *aad_data = op->sym->aead.aad.data;
1359                         /* This is true AAD length, it not includes 18 bytes of
1360                          * preceding data
1361                          */
1362                         uint8_t aad_ccm_real_len = 0;
1363
1364                         uint8_t aad_len_field_sz = 0;
1365                         uint32_t msg_len_be =
1366                                         rte_bswap32(op->sym->aead.data.length);
1367
1368                         if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
1369                                 aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
1370                                 aad_ccm_real_len = ctx->aad_len -
1371                                         ICP_QAT_HW_CCM_AAD_B0_LEN -
1372                                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
1373                         } else {
1374                                 /*
1375                                  * aad_len not greater than 18, so no actual aad data,
1376                                  * then use IV after op for B0 block
1377                                  */
1378                                 aad_data = rte_crypto_op_ctod_offset(op, uint8_t *,
1379                                                 ctx->cipher_iv.offset);
1380                                 aad_phys_addr_aead =
1381                                                 rte_crypto_op_ctophys_offset(op,
1382                                                                 ctx->cipher_iv.offset);
1383                         }
1384
1385                         uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
1386
1387                         aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
1388                                                         ctx->digest_length, q);
1389
1390                         if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
1391                                 memcpy(aad_data + ctx->cipher_iv.length +
1392                                         ICP_QAT_HW_CCM_NONCE_OFFSET
1393                                         + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
1394                                         (uint8_t *)&msg_len_be,
1395                                         ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
1396                         } else {
1397                                 memcpy(aad_data + ctx->cipher_iv.length +
1398                                         ICP_QAT_HW_CCM_NONCE_OFFSET,
1399                                         (uint8_t *)&msg_len_be
1400                                         + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
1401                                         - q), q);
1402                         }
1403
1404                         if (aad_len_field_sz > 0) {
1405                                 *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
1406                                                 = rte_bswap16(aad_ccm_real_len);
1407
1408                                 if ((aad_ccm_real_len + aad_len_field_sz)
1409                                                 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
1410                                         uint8_t pad_len = 0;
1411                                         uint8_t pad_idx = 0;
1412
1413                                         pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
1414                                                 ((aad_ccm_real_len + aad_len_field_sz) %
1415                                                         ICP_QAT_HW_CCM_AAD_B0_LEN);
1416                                         pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
1417                                                 aad_ccm_real_len + aad_len_field_sz;
1418                                         memset(&aad_data[pad_idx],
1419                                                         0, pad_len);
1420                                 }
1421
1422                         }
1423
1424                         set_cipher_iv_ccm(ctx->cipher_iv.length,
1425                                         ctx->cipher_iv.offset,
1426                                         cipher_param, op, q,
1427                                         aad_len_field_sz);
1428
1429                 }
1430
1431                 cipher_len = op->sym->aead.data.length;
1432                 cipher_ofs = op->sym->aead.data.offset;
1433                 auth_len = op->sym->aead.data.length;
1434                 auth_ofs = op->sym->aead.data.offset;
1435
1436                 auth_param->u1.aad_adr = aad_phys_addr_aead;
1437                 auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
1438                 min_ofs = op->sym->aead.data.offset;
1439         }
1440
1441         if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
1442                 do_sgl = 1;
1443
1444         /* adjust for chain case */
1445         if (do_cipher && do_auth)
1446                 min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
1447
1448         if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
1449                 min_ofs = 0;
1450
1451         if (unlikely(op->sym->m_dst != NULL)) {
1452                 /* Out-of-place operation (OOP)
1453                  * Don't align DMA start. DMA the minimum data-set
1454                  * so as not to overwrite data in dest buffer
1455                  */
1456                 src_buf_start =
1457                         rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
1458                 dst_buf_start =
1459                         rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
1460
1461         } else {
1462                 /* In-place operation
1463                  * Start DMA at nearest aligned address below min_ofs
1464                  */
1465                 src_buf_start =
1466                         rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
1467                                                 & QAT_64_BTYE_ALIGN_MASK;
1468
1469                 if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
1470                                         rte_pktmbuf_headroom(op->sym->m_src))
1471                                                         > src_buf_start)) {
1472                         /* alignment has pushed addr ahead of start of mbuf
1473                          * so revert and take the performance hit
1474                          */
1475                         src_buf_start =
1476                                 rte_pktmbuf_iova_offset(op->sym->m_src,
1477                                                                 min_ofs);
1478                 }
1479                 dst_buf_start = src_buf_start;
1480         }
1481
1482         if (do_cipher || do_aead) {
1483                 cipher_param->cipher_offset =
1484                                 (uint32_t)rte_pktmbuf_iova_offset(
1485                                 op->sym->m_src, cipher_ofs) - src_buf_start;
1486                 cipher_param->cipher_length = cipher_len;
1487         } else {
1488                 cipher_param->cipher_offset = 0;
1489                 cipher_param->cipher_length = 0;
1490         }
1491
1492         if (do_auth || do_aead) {
1493                 auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
1494                                 op->sym->m_src, auth_ofs) - src_buf_start;
1495                 auth_param->auth_len = auth_len;
1496         } else {
1497                 auth_param->auth_off = 0;
1498                 auth_param->auth_len = 0;
1499         }
1500
1501         qat_req->comn_mid.dst_length =
1502                 qat_req->comn_mid.src_length =
1503                 (cipher_param->cipher_offset + cipher_param->cipher_length)
1504                 > (auth_param->auth_off + auth_param->auth_len) ?
1505                 (cipher_param->cipher_offset + cipher_param->cipher_length)
1506                 : (auth_param->auth_off + auth_param->auth_len);
1507
1508         if (do_sgl) {
1509
1510                 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
1511                                 QAT_COMN_PTR_TYPE_SGL);
1512                 ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
1513                                 &qat_op_cookie->qat_sgl_list_src,
1514                                 qat_req->comn_mid.src_length);
1515                 if (ret) {
1516                         PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
1517                         return ret;
1518                 }
1519
1520                 if (likely(op->sym->m_dst == NULL))
1521                         qat_req->comn_mid.dest_data_addr =
1522                                 qat_req->comn_mid.src_data_addr =
1523                                 qat_op_cookie->qat_sgl_src_phys_addr;
1524                 else {
1525                         ret = qat_sgl_fill_array(op->sym->m_dst,
1526                                         dst_buf_start,
1527                                         &qat_op_cookie->qat_sgl_list_dst,
1528                                                 qat_req->comn_mid.dst_length);
1529
1530                         if (ret) {
1531                                 PMD_DRV_LOG(ERR, "QAT PMD Cannot "
1532                                                 "fill sgl array");
1533                                 return ret;
1534                         }
1535
1536                         qat_req->comn_mid.src_data_addr =
1537                                 qat_op_cookie->qat_sgl_src_phys_addr;
1538                         qat_req->comn_mid.dest_data_addr =
1539                                         qat_op_cookie->qat_sgl_dst_phys_addr;
1540                 }
1541         } else {
1542                 qat_req->comn_mid.src_data_addr = src_buf_start;
1543                 qat_req->comn_mid.dest_data_addr = dst_buf_start;
1544         }
1545
1546 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1547         rte_hexdump(stdout, "qat_req:", qat_req,
1548                         sizeof(struct icp_qat_fw_la_bulk_req));
1549         rte_hexdump(stdout, "src_data:",
1550                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
1551                         rte_pktmbuf_data_len(op->sym->m_src));
1552         if (do_cipher) {
1553                 uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
1554                                                 uint8_t *,
1555                                                 ctx->cipher_iv.offset);
1556                 rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
1557                                 ctx->cipher_iv.length);
1558         }
1559
1560         if (do_auth) {
1561                 if (ctx->auth_iv.length) {
1562                         uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
1563                                                         uint8_t *,
1564                                                         ctx->auth_iv.offset);
1565                         rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
1566                                                 ctx->auth_iv.length);
1567                 }
1568                 rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
1569                                 ctx->digest_length);
1570         }
1571
1572         if (do_aead) {
1573                 rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
1574                                 ctx->digest_length);
1575                 rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
1576                                 ctx->aad_len);
1577         }
1578 #endif
1579         return 0;
1580 }
1581
1582 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
1583 {
1584         uint32_t div = data >> shift;
1585         uint32_t mult = div << shift;
1586
1587         return data - mult;
1588 }
1589
1590 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
1591                 struct rte_cryptodev_stats *stats)
1592 {
1593         int i;
1594         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1595
1596         PMD_INIT_FUNC_TRACE();
1597         if (stats == NULL) {
1598                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1599                 return;
1600         }
1601         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1602                 if (qp[i] == NULL) {
1603                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1604                         continue;
1605                 }
1606
1607                 stats->enqueued_count += qp[i]->stats.enqueued_count;
1608                 stats->dequeued_count += qp[i]->stats.dequeued_count;
1609                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
1610                 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
1611         }
1612 }
1613
1614 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
1615 {
1616         int i;
1617         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1618
1619         PMD_INIT_FUNC_TRACE();
1620         for (i = 0; i < dev->data->nb_queue_pairs; i++)
1621                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
1622         PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
1623 }