crypto/qat: enable Rx head writes coalescing
[dpdk.git] / drivers / crypto / qat / qat_crypto.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *       * Redistributions of source code must retain the above copyright
12  *         notice, this list of conditions and the following disclaimer.
13  *       * Redistributions in binary form must reproduce the above copyright
14  *         notice, this list of conditions and the following disclaimer in
15  *         the documentation and/or other materials provided with the
16  *         distribution.
17  *       * Neither the name of Intel Corporation nor the names of its
18  *         contributors may be used to endorse or promote products derived
19  *         from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <string.h>
38 #include <inttypes.h>
39 #include <errno.h>
40 #include <sys/queue.h>
41 #include <stdarg.h>
42
43 #include <rte_common.h>
44 #include <rte_log.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_tailq.h>
49 #include <rte_malloc.h>
50 #include <rte_launch.h>
51 #include <rte_eal.h>
52 #include <rte_per_lcore.h>
53 #include <rte_lcore.h>
54 #include <rte_branch_prediction.h>
55 #include <rte_mempool.h>
56 #include <rte_mbuf.h>
57 #include <rte_string_fns.h>
58 #include <rte_spinlock.h>
59 #include <rte_hexdump.h>
60 #include <rte_crypto_sym.h>
61 #include <rte_cryptodev_pci.h>
62 #include <openssl/evp.h>
63
64 #include "qat_logs.h"
65 #include "qat_algs.h"
66 #include "qat_crypto.h"
67 #include "adf_transport_access_macros.h"
68
69 #define BYTE_LENGTH    8
70
71 static int
72 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
73                 struct qat_pmd_private *internals) {
74         int i = 0;
75         const struct rte_cryptodev_capabilities *capability;
76
77         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
78                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
79                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
80                         continue;
81
82                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
83                         continue;
84
85                 if (capability->sym.cipher.algo == algo)
86                         return 1;
87         }
88         return 0;
89 }
90
91 static int
92 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
93                 struct qat_pmd_private *internals) {
94         int i = 0;
95         const struct rte_cryptodev_capabilities *capability;
96
97         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
98                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
99                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
100                         continue;
101
102                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
103                         continue;
104
105                 if (capability->sym.auth.algo == algo)
106                         return 1;
107         }
108         return 0;
109 }
110
111 /** Encrypt a single partial block
112  *  Depends on openssl libcrypto
113  *  Uses ECB+XOR to do CFB encryption, same result, more performant
114  */
115 static inline int
116 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
117                 uint8_t *iv, int ivlen, int srclen,
118                 void *bpi_ctx)
119 {
120         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
121         int encrypted_ivlen;
122         uint8_t encrypted_iv[16];
123         int i;
124
125         /* ECB method: encrypt the IV, then XOR this with plaintext */
126         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
127                                                                 <= 0)
128                 goto cipher_encrypt_err;
129
130         for (i = 0; i < srclen; i++)
131                 *(dst+i) = *(src+i)^(encrypted_iv[i]);
132
133         return 0;
134
135 cipher_encrypt_err:
136         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
137         return -EINVAL;
138 }
139
140 /** Decrypt a single partial block
141  *  Depends on openssl libcrypto
142  *  Uses ECB+XOR to do CFB encryption, same result, more performant
143  */
144 static inline int
145 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
146                 uint8_t *iv, int ivlen, int srclen,
147                 void *bpi_ctx)
148 {
149         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
150         int encrypted_ivlen;
151         uint8_t encrypted_iv[16];
152         int i;
153
154         /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
155         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
156                                                                 <= 0)
157                 goto cipher_decrypt_err;
158
159         for (i = 0; i < srclen; i++)
160                 *(dst+i) = *(src+i)^(encrypted_iv[i]);
161
162         return 0;
163
164 cipher_decrypt_err:
165         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed");
166         return -EINVAL;
167 }
168
169 /** Creates a context in either AES or DES in ECB mode
170  *  Depends on openssl libcrypto
171  */
172 static int
173 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
174                 enum rte_crypto_cipher_operation direction __rte_unused,
175                 uint8_t *key, void **ctx)
176 {
177         const EVP_CIPHER *algo = NULL;
178         int ret;
179         *ctx = EVP_CIPHER_CTX_new();
180
181         if (*ctx == NULL) {
182                 ret = -ENOMEM;
183                 goto ctx_init_err;
184         }
185
186         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
187                 algo = EVP_des_ecb();
188         else
189                 algo = EVP_aes_128_ecb();
190
191         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
192         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
193                 ret = -EINVAL;
194                 goto ctx_init_err;
195         }
196
197         return 0;
198
199 ctx_init_err:
200         if (*ctx != NULL)
201                 EVP_CIPHER_CTX_free(*ctx);
202         return ret;
203 }
204
205 /** Frees a context previously created
206  *  Depends on openssl libcrypto
207  */
208 static void
209 bpi_cipher_ctx_free(void *bpi_ctx)
210 {
211         if (bpi_ctx != NULL)
212                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
213 }
214
215 static inline uint32_t
216 adf_modulo(uint32_t data, uint32_t shift);
217
218 static inline int
219 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
220                 struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
221
222 void
223 qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
224                 struct rte_cryptodev_sym_session *sess)
225 {
226         PMD_INIT_FUNC_TRACE();
227         uint8_t index = dev->driver_id;
228         void *sess_priv = get_session_private_data(sess, index);
229         struct qat_session *s = (struct qat_session *)sess_priv;
230
231         if (sess_priv) {
232                 if (s->bpi_ctx)
233                         bpi_cipher_ctx_free(s->bpi_ctx);
234                 memset(s, 0, qat_crypto_sym_get_session_private_size(dev));
235                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
236                 set_session_private_data(sess, index, NULL);
237                 rte_mempool_put(sess_mp, sess_priv);
238         }
239 }
240
241 static int
242 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
243 {
244         /* Cipher Only */
245         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
246                 return ICP_QAT_FW_LA_CMD_CIPHER;
247
248         /* Authentication Only */
249         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
250                 return ICP_QAT_FW_LA_CMD_AUTH;
251
252         /* AEAD */
253         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
254                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
255                         return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
256                 else
257                         return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
258         }
259
260         if (xform->next == NULL)
261                 return -1;
262
263         /* Cipher then Authenticate */
264         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
265                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
266                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
267
268         /* Authenticate then Cipher */
269         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
270                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
271                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
272
273         return -1;
274 }
275
276 static struct rte_crypto_auth_xform *
277 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
278 {
279         do {
280                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
281                         return &xform->auth;
282
283                 xform = xform->next;
284         } while (xform);
285
286         return NULL;
287 }
288
289 static struct rte_crypto_cipher_xform *
290 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
291 {
292         do {
293                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
294                         return &xform->cipher;
295
296                 xform = xform->next;
297         } while (xform);
298
299         return NULL;
300 }
301
302 int
303 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
304                 struct rte_crypto_sym_xform *xform,
305                 struct qat_session *session)
306 {
307         struct qat_pmd_private *internals = dev->data->dev_private;
308         struct rte_crypto_cipher_xform *cipher_xform = NULL;
309         int ret;
310
311         /* Get cipher xform from crypto xform chain */
312         cipher_xform = qat_get_cipher_xform(xform);
313
314         session->cipher_iv.offset = cipher_xform->iv.offset;
315         session->cipher_iv.length = cipher_xform->iv.length;
316
317         switch (cipher_xform->algo) {
318         case RTE_CRYPTO_CIPHER_AES_CBC:
319                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
320                                 &session->qat_cipher_alg) != 0) {
321                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
322                         ret = -EINVAL;
323                         goto error_out;
324                 }
325                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
326                 break;
327         case RTE_CRYPTO_CIPHER_AES_CTR:
328                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
329                                 &session->qat_cipher_alg) != 0) {
330                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
331                         ret = -EINVAL;
332                         goto error_out;
333                 }
334                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
335                 break;
336         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
337                 if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
338                                         &session->qat_cipher_alg) != 0) {
339                         PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
340                         ret = -EINVAL;
341                         goto error_out;
342                 }
343                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
344                 break;
345         case RTE_CRYPTO_CIPHER_NULL:
346                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
347                 break;
348         case RTE_CRYPTO_CIPHER_KASUMI_F8:
349                 if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
350                                         &session->qat_cipher_alg) != 0) {
351                         PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
352                         ret = -EINVAL;
353                         goto error_out;
354                 }
355                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
356                 break;
357         case RTE_CRYPTO_CIPHER_3DES_CBC:
358                 if (qat_alg_validate_3des_key(cipher_xform->key.length,
359                                 &session->qat_cipher_alg) != 0) {
360                         PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
361                         ret = -EINVAL;
362                         goto error_out;
363                 }
364                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
365                 break;
366         case RTE_CRYPTO_CIPHER_DES_CBC:
367                 if (qat_alg_validate_des_key(cipher_xform->key.length,
368                                 &session->qat_cipher_alg) != 0) {
369                         PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
370                         ret = -EINVAL;
371                         goto error_out;
372                 }
373                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
374                 break;
375         case RTE_CRYPTO_CIPHER_3DES_CTR:
376                 if (qat_alg_validate_3des_key(cipher_xform->key.length,
377                                 &session->qat_cipher_alg) != 0) {
378                         PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
379                         ret = -EINVAL;
380                         goto error_out;
381                 }
382                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
383                 break;
384         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
385                 ret = bpi_cipher_ctx_init(
386                                         cipher_xform->algo,
387                                         cipher_xform->op,
388                                         cipher_xform->key.data,
389                                         &session->bpi_ctx);
390                 if (ret != 0) {
391                         PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
392                         goto error_out;
393                 }
394                 if (qat_alg_validate_des_key(cipher_xform->key.length,
395                                 &session->qat_cipher_alg) != 0) {
396                         PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
397                         ret = -EINVAL;
398                         goto error_out;
399                 }
400                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
401                 break;
402         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
403                 ret = bpi_cipher_ctx_init(
404                                         cipher_xform->algo,
405                                         cipher_xform->op,
406                                         cipher_xform->key.data,
407                                         &session->bpi_ctx);
408                 if (ret != 0) {
409                         PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
410                         goto error_out;
411                 }
412                 if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
413                                 &session->qat_cipher_alg) != 0) {
414                         PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
415                         ret = -EINVAL;
416                         goto error_out;
417                 }
418                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
419                 break;
420         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
421                 if (!qat_is_cipher_alg_supported(
422                         cipher_xform->algo, internals)) {
423                         PMD_DRV_LOG(ERR, "%s not supported on this device",
424                                 rte_crypto_cipher_algorithm_strings
425                                         [cipher_xform->algo]);
426                         ret = -ENOTSUP;
427                         goto error_out;
428                 }
429                 if (qat_alg_validate_zuc_key(cipher_xform->key.length,
430                                 &session->qat_cipher_alg) != 0) {
431                         PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
432                         ret = -EINVAL;
433                         goto error_out;
434                 }
435                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
436                 break;
437         case RTE_CRYPTO_CIPHER_3DES_ECB:
438         case RTE_CRYPTO_CIPHER_AES_ECB:
439         case RTE_CRYPTO_CIPHER_AES_F8:
440         case RTE_CRYPTO_CIPHER_AES_XTS:
441         case RTE_CRYPTO_CIPHER_ARC4:
442                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
443                                 cipher_xform->algo);
444                 ret = -ENOTSUP;
445                 goto error_out;
446         default:
447                 PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
448                                 cipher_xform->algo);
449                 ret = -EINVAL;
450                 goto error_out;
451         }
452
453         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
454                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
455         else
456                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
457
458         if (qat_alg_aead_session_create_content_desc_cipher(session,
459                                                 cipher_xform->key.data,
460                                                 cipher_xform->key.length)) {
461                 ret = -EINVAL;
462                 goto error_out;
463         }
464
465         return 0;
466
467 error_out:
468         if (session->bpi_ctx) {
469                 bpi_cipher_ctx_free(session->bpi_ctx);
470                 session->bpi_ctx = NULL;
471         }
472         return ret;
473 }
474
475 int
476 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
477                 struct rte_crypto_sym_xform *xform,
478                 struct rte_cryptodev_sym_session *sess,
479                 struct rte_mempool *mempool)
480 {
481         void *sess_private_data;
482         int ret;
483
484         if (rte_mempool_get(mempool, &sess_private_data)) {
485                 CDEV_LOG_ERR(
486                         "Couldn't get object from session mempool");
487                 return -ENOMEM;
488         }
489
490         ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data);
491         if (ret != 0) {
492                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure "
493                                 "session parameters");
494
495                 /* Return session to mempool */
496                 rte_mempool_put(mempool, sess_private_data);
497                 return ret;
498         }
499
500         set_session_private_data(sess, dev->driver_id,
501                 sess_private_data);
502
503         return 0;
504 }
505
506 int
507 qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
508                 struct rte_crypto_sym_xform *xform, void *session_private)
509 {
510         struct qat_session *session = session_private;
511         int ret;
512
513         int qat_cmd_id;
514         PMD_INIT_FUNC_TRACE();
515
516         /* Set context descriptor physical address */
517         session->cd_paddr = rte_mempool_virt2phy(NULL, session) +
518                         offsetof(struct qat_session, cd);
519
520         session->min_qat_dev_gen = QAT_GEN1;
521
522         /* Get requested QAT command id */
523         qat_cmd_id = qat_get_cmd_id(xform);
524         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
525                 PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
526                 return -ENOTSUP;
527         }
528         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
529         switch (session->qat_cmd) {
530         case ICP_QAT_FW_LA_CMD_CIPHER:
531                 ret = qat_crypto_sym_configure_session_cipher(dev, xform, session);
532                 if (ret < 0)
533                         return ret;
534                 break;
535         case ICP_QAT_FW_LA_CMD_AUTH:
536                 ret = qat_crypto_sym_configure_session_auth(dev, xform, session);
537                 if (ret < 0)
538                         return ret;
539                 break;
540         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
541                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
542                         ret = qat_crypto_sym_configure_session_aead(xform,
543                                         session);
544                         if (ret < 0)
545                                 return ret;
546                 } else {
547                         ret = qat_crypto_sym_configure_session_cipher(dev,
548                                         xform, session);
549                         if (ret < 0)
550                                 return ret;
551                         ret = qat_crypto_sym_configure_session_auth(dev,
552                                         xform, session);
553                         if (ret < 0)
554                                 return ret;
555                 }
556                 break;
557         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
558                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
559                         ret = qat_crypto_sym_configure_session_aead(xform,
560                                         session);
561                         if (ret < 0)
562                                 return ret;
563                 } else {
564                         ret = qat_crypto_sym_configure_session_auth(dev,
565                                         xform, session);
566                         if (ret < 0)
567                                 return ret;
568                         ret = qat_crypto_sym_configure_session_cipher(dev,
569                                         xform, session);
570                         if (ret < 0)
571                                 return ret;
572                 }
573                 break;
574         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
575         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
576         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
577         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
578         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
579         case ICP_QAT_FW_LA_CMD_MGF1:
580         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
581         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
582         case ICP_QAT_FW_LA_CMD_DELIMITER:
583         PMD_DRV_LOG(ERR, "Unsupported Service %u",
584                 session->qat_cmd);
585                 return -ENOTSUP;
586         default:
587         PMD_DRV_LOG(ERR, "Unsupported Service %u",
588                 session->qat_cmd);
589                 return -ENOTSUP;
590         }
591
592         return 0;
593 }
594
595 int
596 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
597                                 struct rte_crypto_sym_xform *xform,
598                                 struct qat_session *session)
599 {
600         struct rte_crypto_auth_xform *auth_xform = NULL;
601         struct qat_pmd_private *internals = dev->data->dev_private;
602         auth_xform = qat_get_auth_xform(xform);
603         uint8_t *key_data = auth_xform->key.data;
604         uint8_t key_length = auth_xform->key.length;
605
606         switch (auth_xform->algo) {
607         case RTE_CRYPTO_AUTH_SHA1_HMAC:
608                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
609                 break;
610         case RTE_CRYPTO_AUTH_SHA224_HMAC:
611                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
612                 break;
613         case RTE_CRYPTO_AUTH_SHA256_HMAC:
614                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
615                 break;
616         case RTE_CRYPTO_AUTH_SHA384_HMAC:
617                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
618                 break;
619         case RTE_CRYPTO_AUTH_SHA512_HMAC:
620                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
621                 break;
622         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
623                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
624                 break;
625         case RTE_CRYPTO_AUTH_AES_GMAC:
626                 if (qat_alg_validate_aes_key(auth_xform->key.length,
627                                 &session->qat_cipher_alg) != 0) {
628                         PMD_DRV_LOG(ERR, "Invalid AES key size");
629                         return -EINVAL;
630                 }
631                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
632                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
633
634                 break;
635         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
636                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
637                 break;
638         case RTE_CRYPTO_AUTH_MD5_HMAC:
639                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
640                 break;
641         case RTE_CRYPTO_AUTH_NULL:
642                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
643                 break;
644         case RTE_CRYPTO_AUTH_KASUMI_F9:
645                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
646                 break;
647         case RTE_CRYPTO_AUTH_ZUC_EIA3:
648                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
649                         PMD_DRV_LOG(ERR, "%s not supported on this device",
650                                 rte_crypto_auth_algorithm_strings
651                                 [auth_xform->algo]);
652                         return -ENOTSUP;
653                 }
654                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
655                 break;
656         case RTE_CRYPTO_AUTH_SHA1:
657         case RTE_CRYPTO_AUTH_SHA256:
658         case RTE_CRYPTO_AUTH_SHA512:
659         case RTE_CRYPTO_AUTH_SHA224:
660         case RTE_CRYPTO_AUTH_SHA384:
661         case RTE_CRYPTO_AUTH_MD5:
662         case RTE_CRYPTO_AUTH_AES_CMAC:
663         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
664                 PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
665                                 auth_xform->algo);
666                 return -ENOTSUP;
667         default:
668                 PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
669                                 auth_xform->algo);
670                 return -EINVAL;
671         }
672
673         session->auth_iv.offset = auth_xform->iv.offset;
674         session->auth_iv.length = auth_xform->iv.length;
675
676         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
677                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
678                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
679                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
680                         /*
681                          * It needs to create cipher desc content first,
682                          * then authentication
683                          */
684                         if (qat_alg_aead_session_create_content_desc_cipher(session,
685                                                 auth_xform->key.data,
686                                                 auth_xform->key.length))
687                                 return -EINVAL;
688
689                         if (qat_alg_aead_session_create_content_desc_auth(session,
690                                                 key_data,
691                                                 key_length,
692                                                 0,
693                                                 auth_xform->digest_length,
694                                                 auth_xform->op))
695                                 return -EINVAL;
696                 } else {
697                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
698                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
699                         /*
700                          * It needs to create authentication desc content first,
701                          * then cipher
702                          */
703                         if (qat_alg_aead_session_create_content_desc_auth(session,
704                                         key_data,
705                                         key_length,
706                                         0,
707                                         auth_xform->digest_length,
708                                         auth_xform->op))
709                                 return -EINVAL;
710
711                         if (qat_alg_aead_session_create_content_desc_cipher(session,
712                                                 auth_xform->key.data,
713                                                 auth_xform->key.length))
714                                 return -EINVAL;
715                 }
716                 /* Restore to authentication only only */
717                 session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
718         } else {
719                 if (qat_alg_aead_session_create_content_desc_auth(session,
720                                 key_data,
721                                 key_length,
722                                 0,
723                                 auth_xform->digest_length,
724                                 auth_xform->op))
725                         return -EINVAL;
726         }
727
728         session->digest_length = auth_xform->digest_length;
729         return 0;
730 }
731
732 int
733 qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
734                                 struct qat_session *session)
735 {
736         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
737
738         /*
739          * Store AEAD IV parameters as cipher IV,
740          * to avoid unnecessary memory usage
741          */
742         session->cipher_iv.offset = xform->aead.iv.offset;
743         session->cipher_iv.length = xform->aead.iv.length;
744
745         switch (aead_xform->algo) {
746         case RTE_CRYPTO_AEAD_AES_GCM:
747                 if (qat_alg_validate_aes_key(aead_xform->key.length,
748                                 &session->qat_cipher_alg) != 0) {
749                         PMD_DRV_LOG(ERR, "Invalid AES key size");
750                         return -EINVAL;
751                 }
752                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
753                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
754                 break;
755         case RTE_CRYPTO_AEAD_AES_CCM:
756                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported AEAD alg %u",
757                                 aead_xform->algo);
758                 return -ENOTSUP;
759         default:
760                 PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
761                                 aead_xform->algo);
762                 return -EINVAL;
763         }
764
765         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
766                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
767                 /*
768                  * It needs to create cipher desc content first,
769                  * then authentication
770                  */
771                 if (qat_alg_aead_session_create_content_desc_cipher(session,
772                                         aead_xform->key.data,
773                                         aead_xform->key.length))
774                         return -EINVAL;
775
776                 if (qat_alg_aead_session_create_content_desc_auth(session,
777                                         aead_xform->key.data,
778                                         aead_xform->key.length,
779                                         aead_xform->aad_length,
780                                         aead_xform->digest_length,
781                                         RTE_CRYPTO_AUTH_OP_GENERATE))
782                         return -EINVAL;
783         } else {
784                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
785                 /*
786                  * It needs to create authentication desc content first,
787                  * then cipher
788                  */
789                 if (qat_alg_aead_session_create_content_desc_auth(session,
790                                         aead_xform->key.data,
791                                         aead_xform->key.length,
792                                         aead_xform->aad_length,
793                                         aead_xform->digest_length,
794                                         RTE_CRYPTO_AUTH_OP_VERIFY))
795                         return -EINVAL;
796
797                 if (qat_alg_aead_session_create_content_desc_cipher(session,
798                                         aead_xform->key.data,
799                                         aead_xform->key.length))
800                         return -EINVAL;
801         }
802
803         session->digest_length = aead_xform->digest_length;
804         return 0;
805 }
806
807 unsigned qat_crypto_sym_get_session_private_size(
808                 struct rte_cryptodev *dev __rte_unused)
809 {
810         return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
811 }
812
813 static inline uint32_t
814 qat_bpicipher_preprocess(struct qat_session *ctx,
815                                 struct rte_crypto_op *op)
816 {
817         uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
818         struct rte_crypto_sym_op *sym_op = op->sym;
819         uint8_t last_block_len = block_len > 0 ?
820                         sym_op->cipher.data.length % block_len : 0;
821
822         if (last_block_len &&
823                         ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
824
825                 /* Decrypt last block */
826                 uint8_t *last_block, *dst, *iv;
827                 uint32_t last_block_offset = sym_op->cipher.data.offset +
828                                 sym_op->cipher.data.length - last_block_len;
829                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
830                                 uint8_t *, last_block_offset);
831
832                 if (unlikely(sym_op->m_dst != NULL))
833                         /* out-of-place operation (OOP) */
834                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
835                                                 uint8_t *, last_block_offset);
836                 else
837                         dst = last_block;
838
839                 if (last_block_len < sym_op->cipher.data.length)
840                         /* use previous block ciphertext as IV */
841                         iv = last_block - block_len;
842                 else
843                         /* runt block, i.e. less than one full block */
844                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
845                                         ctx->cipher_iv.offset);
846
847 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
848                 rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
849                         last_block_len);
850                 if (sym_op->m_dst != NULL)
851                         rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
852                                 last_block_len);
853 #endif
854                 bpi_cipher_decrypt(last_block, dst, iv, block_len,
855                                 last_block_len, ctx->bpi_ctx);
856 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
857                 rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
858                         last_block_len);
859                 if (sym_op->m_dst != NULL)
860                         rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
861                                 last_block_len);
862 #endif
863         }
864
865         return sym_op->cipher.data.length - last_block_len;
866 }
867
868 static inline uint32_t
869 qat_bpicipher_postprocess(struct qat_session *ctx,
870                                 struct rte_crypto_op *op)
871 {
872         uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
873         struct rte_crypto_sym_op *sym_op = op->sym;
874         uint8_t last_block_len = block_len > 0 ?
875                         sym_op->cipher.data.length % block_len : 0;
876
877         if (last_block_len > 0 &&
878                         ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
879
880                 /* Encrypt last block */
881                 uint8_t *last_block, *dst, *iv;
882                 uint32_t last_block_offset;
883
884                 last_block_offset = sym_op->cipher.data.offset +
885                                 sym_op->cipher.data.length - last_block_len;
886                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
887                                 uint8_t *, last_block_offset);
888
889                 if (unlikely(sym_op->m_dst != NULL))
890                         /* out-of-place operation (OOP) */
891                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
892                                                 uint8_t *, last_block_offset);
893                 else
894                         dst = last_block;
895
896                 if (last_block_len < sym_op->cipher.data.length)
897                         /* use previous block ciphertext as IV */
898                         iv = dst - block_len;
899                 else
900                         /* runt block, i.e. less than one full block */
901                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
902                                         ctx->cipher_iv.offset);
903
904 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
905                 rte_hexdump(stdout, "BPI: src before post-process:", last_block,
906                         last_block_len);
907                 if (sym_op->m_dst != NULL)
908                         rte_hexdump(stdout, "BPI: dst before post-process:",
909                                         dst, last_block_len);
910 #endif
911                 bpi_cipher_encrypt(last_block, dst, iv, block_len,
912                                 last_block_len, ctx->bpi_ctx);
913 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
914                 rte_hexdump(stdout, "BPI: src after post-process:", last_block,
915                         last_block_len);
916                 if (sym_op->m_dst != NULL)
917                         rte_hexdump(stdout, "BPI: dst after post-process:", dst,
918                                 last_block_len);
919 #endif
920         }
921         return sym_op->cipher.data.length - last_block_len;
922 }
923
924 uint16_t
925 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
926                 uint16_t nb_ops)
927 {
928         register struct qat_queue *queue;
929         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
930         register uint32_t nb_ops_sent = 0;
931         register struct rte_crypto_op **cur_op = ops;
932         register int ret;
933         uint16_t nb_ops_possible = nb_ops;
934         register uint8_t *base_addr;
935         register uint32_t tail;
936         int overflow;
937
938         if (unlikely(nb_ops == 0))
939                 return 0;
940
941         /* read params used a lot in main loop into registers */
942         queue = &(tmp_qp->tx_q);
943         base_addr = (uint8_t *)queue->base_addr;
944         tail = queue->tail;
945
946         /* Find how many can actually fit on the ring */
947         tmp_qp->inflights16 += nb_ops;
948         overflow = tmp_qp->inflights16 - queue->max_inflights;
949         if (overflow > 0) {
950                 tmp_qp->inflights16 -= overflow;
951                 nb_ops_possible = nb_ops - overflow;
952                 if (nb_ops_possible == 0)
953                         return 0;
954         }
955
956         while (nb_ops_sent != nb_ops_possible) {
957                 ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
958                         tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
959                 if (ret != 0) {
960                         tmp_qp->stats.enqueue_err_count++;
961                         /*
962                          * This message cannot be enqueued,
963                          * decrease number of ops that wasn't sent
964                          */
965                         tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
966                         if (nb_ops_sent == 0)
967                                 return 0;
968                         goto kick_tail;
969                 }
970
971                 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
972                 nb_ops_sent++;
973                 cur_op++;
974         }
975 kick_tail:
976         WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
977                         queue->hw_queue_number, tail);
978         queue->tail = tail;
979         tmp_qp->stats.enqueued_count += nb_ops_sent;
980         return nb_ops_sent;
981 }
982
983 static inline
984 void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
985 {
986         uint32_t old_head, new_head;
987         uint32_t max_head;
988
989         old_head = q->csr_head;
990         new_head = q->head;
991         max_head = qp->nb_descriptors * q->msg_size;
992
993         /* write out free descriptors */
994         void *cur_desc = (uint8_t *)q->base_addr + old_head;
995
996         if (new_head < old_head) {
997                 memset(cur_desc, ADF_RING_EMPTY_SIG, max_head - old_head);
998                 memset(q->base_addr, ADF_RING_EMPTY_SIG, new_head);
999         } else {
1000                 memset(cur_desc, ADF_RING_EMPTY_SIG, new_head - old_head);
1001         }
1002         q->nb_processed_responses = 0;
1003         q->csr_head = new_head;
1004
1005         /* write current head to CSR */
1006         WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
1007                             q->hw_queue_number, new_head);
1008 }
1009
1010 uint16_t
1011 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
1012                 uint16_t nb_ops)
1013 {
1014         struct qat_queue *queue;
1015         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
1016         uint32_t msg_counter = 0;
1017         struct rte_crypto_op *rx_op;
1018         struct icp_qat_fw_comn_resp *resp_msg;
1019         uint32_t head;
1020
1021         queue = &(tmp_qp->rx_q);
1022         head = queue->head;
1023         resp_msg = (struct icp_qat_fw_comn_resp *)
1024                         ((uint8_t *)queue->base_addr + head);
1025
1026         while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
1027                         msg_counter != nb_ops) {
1028                 rx_op = (struct rte_crypto_op *)(uintptr_t)
1029                                 (resp_msg->opaque_data);
1030
1031 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
1032                 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
1033                         sizeof(struct icp_qat_fw_comn_resp));
1034
1035 #endif
1036                 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
1037                                 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
1038                                         resp_msg->comn_hdr.comn_status)) {
1039                         rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1040                 } else {
1041                         struct qat_session *sess = (struct qat_session *)
1042                                         get_session_private_data(
1043                                         rx_op->sym->session,
1044                                         cryptodev_qat_driver_id);
1045
1046                         if (sess->bpi_ctx)
1047                                 qat_bpicipher_postprocess(sess, rx_op);
1048                         rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1049                 }
1050
1051                 head = adf_modulo(head + queue->msg_size, queue->modulo);
1052                 resp_msg = (struct icp_qat_fw_comn_resp *)
1053                                 ((uint8_t *)queue->base_addr + head);
1054                 *ops = rx_op;
1055                 ops++;
1056                 msg_counter++;
1057         }
1058         if (msg_counter > 0) {
1059                 queue->head = head;
1060                 tmp_qp->stats.dequeued_count += msg_counter;
1061                 queue->nb_processed_responses += msg_counter;
1062                 tmp_qp->inflights16 -= msg_counter;
1063
1064                 if (queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
1065                         rxq_free_desc(tmp_qp, queue);
1066         }
1067         return msg_counter;
1068 }
1069
1070 static inline int
1071 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
1072                 struct qat_alg_buf_list *list, uint32_t data_len)
1073 {
1074         int nr = 1;
1075
1076         uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
1077                         buff_start + rte_pktmbuf_data_len(buf);
1078
1079         list->bufers[0].addr = buff_start;
1080         list->bufers[0].resrvd = 0;
1081         list->bufers[0].len = buf_len;
1082
1083         if (data_len <= buf_len) {
1084                 list->num_bufs = nr;
1085                 list->bufers[0].len = data_len;
1086                 return 0;
1087         }
1088
1089         buf = buf->next;
1090         while (buf) {
1091                 if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
1092                         PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
1093                                         " entry(%u)",
1094                                         QAT_SGL_MAX_NUMBER);
1095                         return -EINVAL;
1096                 }
1097
1098                 list->bufers[nr].len = rte_pktmbuf_data_len(buf);
1099                 list->bufers[nr].resrvd = 0;
1100                 list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
1101
1102                 buf_len += list->bufers[nr].len;
1103                 buf = buf->next;
1104
1105                 if (buf_len > data_len) {
1106                         list->bufers[nr].len -=
1107                                 buf_len - data_len;
1108                         buf = NULL;
1109                 }
1110                 ++nr;
1111         }
1112         list->num_bufs = nr;
1113
1114         return 0;
1115 }
1116
1117 static inline void
1118 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
1119                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
1120                 struct rte_crypto_op *op,
1121                 struct icp_qat_fw_la_bulk_req *qat_req)
1122 {
1123         /* copy IV into request if it fits */
1124         if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
1125                 rte_memcpy(cipher_param->u.cipher_IV_array,
1126                                 rte_crypto_op_ctod_offset(op, uint8_t *,
1127                                         iv_offset),
1128                                 iv_length);
1129         } else {
1130                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
1131                                 qat_req->comn_hdr.serv_specif_flags,
1132                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
1133                 cipher_param->u.s.cipher_IV_ptr =
1134                                 rte_crypto_op_ctophys_offset(op,
1135                                         iv_offset);
1136         }
1137 }
1138
1139 static inline int
1140 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
1141                 struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
1142 {
1143         int ret = 0;
1144         struct qat_session *ctx;
1145         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1146         struct icp_qat_fw_la_auth_req_params *auth_param;
1147         register struct icp_qat_fw_la_bulk_req *qat_req;
1148         uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
1149         uint32_t cipher_len = 0, cipher_ofs = 0;
1150         uint32_t auth_len = 0, auth_ofs = 0;
1151         uint32_t min_ofs = 0;
1152         uint64_t src_buf_start = 0, dst_buf_start = 0;
1153         uint8_t do_sgl = 0;
1154
1155 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1156         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
1157                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
1158                                 "operation requests, op (%p) is not a "
1159                                 "symmetric operation.", op);
1160                 return -EINVAL;
1161         }
1162 #endif
1163         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
1164                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
1165                                 " requests, op (%p) is sessionless.", op);
1166                 return -EINVAL;
1167         }
1168
1169         ctx = (struct qat_session *)get_session_private_data(
1170                         op->sym->session, cryptodev_qat_driver_id);
1171
1172         if (unlikely(ctx == NULL)) {
1173                 PMD_DRV_LOG(ERR, "Session was not created for this device");
1174                 return -EINVAL;
1175         }
1176
1177         if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
1178                 PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
1179                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1180                 return -EINVAL;
1181         }
1182
1183         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
1184         rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
1185         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
1186         cipher_param = (void *)&qat_req->serv_specif_rqpars;
1187         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
1188
1189         if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
1190                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1191                 /* AES-GCM */
1192                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1193                                 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1194                         do_aead = 1;
1195                 } else {
1196                         do_auth = 1;
1197                         do_cipher = 1;
1198                 }
1199         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1200                 do_auth = 1;
1201                 do_cipher = 0;
1202         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1203                 do_auth = 0;
1204                 do_cipher = 1;
1205         }
1206
1207         if (do_cipher) {
1208
1209                 if (ctx->qat_cipher_alg ==
1210                                          ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
1211                         ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
1212                         ctx->qat_cipher_alg ==
1213                                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1214
1215                         if (unlikely(
1216                                 (cipher_param->cipher_length % BYTE_LENGTH != 0)
1217                                  || (cipher_param->cipher_offset
1218                                                         % BYTE_LENGTH != 0))) {
1219                                 PMD_DRV_LOG(ERR,
1220                   "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
1221                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1222                                 return -EINVAL;
1223                         }
1224                         cipher_len = op->sym->cipher.data.length >> 3;
1225                         cipher_ofs = op->sym->cipher.data.offset >> 3;
1226
1227                 } else if (ctx->bpi_ctx) {
1228                         /* DOCSIS - only send complete blocks to device
1229                          * Process any partial block using CFB mode.
1230                          * Even if 0 complete blocks, still send this to device
1231                          * to get into rx queue for post-process and dequeuing
1232                          */
1233                         cipher_len = qat_bpicipher_preprocess(ctx, op);
1234                         cipher_ofs = op->sym->cipher.data.offset;
1235                 } else {
1236                         cipher_len = op->sym->cipher.data.length;
1237                         cipher_ofs = op->sym->cipher.data.offset;
1238                 }
1239
1240                 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
1241                                 cipher_param, op, qat_req);
1242                 min_ofs = cipher_ofs;
1243         }
1244
1245         if (do_auth) {
1246
1247                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
1248                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
1249                         ctx->qat_hash_alg ==
1250                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
1251                         if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
1252                                 || (auth_param->auth_len % BYTE_LENGTH != 0))) {
1253                                 PMD_DRV_LOG(ERR,
1254                 "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
1255                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1256                                 return -EINVAL;
1257                         }
1258                         auth_ofs = op->sym->auth.data.offset >> 3;
1259                         auth_len = op->sym->auth.data.length >> 3;
1260
1261                         auth_param->u1.aad_adr =
1262                                         rte_crypto_op_ctophys_offset(op,
1263                                                         ctx->auth_iv.offset);
1264
1265                 } else if (ctx->qat_hash_alg ==
1266                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1267                                 ctx->qat_hash_alg ==
1268                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1269                         /* AES-GMAC */
1270                         set_cipher_iv(ctx->auth_iv.length,
1271                                 ctx->auth_iv.offset,
1272                                 cipher_param, op, qat_req);
1273                         auth_ofs = op->sym->auth.data.offset;
1274                         auth_len = op->sym->auth.data.length;
1275
1276                         auth_param->u1.aad_adr = 0;
1277                         auth_param->u2.aad_sz = 0;
1278
1279                         /*
1280                          * If len(iv)==12B fw computes J0
1281                          */
1282                         if (ctx->auth_iv.length == 12) {
1283                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1284                                         qat_req->comn_hdr.serv_specif_flags,
1285                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1286
1287                         }
1288                 } else {
1289                         auth_ofs = op->sym->auth.data.offset;
1290                         auth_len = op->sym->auth.data.length;
1291
1292                 }
1293                 min_ofs = auth_ofs;
1294
1295                 auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
1296
1297         }
1298
1299         if (do_aead) {
1300                 if (ctx->qat_hash_alg ==
1301                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1302                                 ctx->qat_hash_alg ==
1303                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1304                         /*
1305                          * If len(iv)==12B fw computes J0
1306                          */
1307                         if (ctx->cipher_iv.length == 12) {
1308                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1309                                         qat_req->comn_hdr.serv_specif_flags,
1310                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1311                         }
1312
1313                 }
1314
1315                 cipher_len = op->sym->aead.data.length;
1316                 cipher_ofs = op->sym->aead.data.offset;
1317                 auth_len = op->sym->aead.data.length;
1318                 auth_ofs = op->sym->aead.data.offset;
1319
1320                 auth_param->u1.aad_adr = op->sym->aead.aad.phys_addr;
1321                 auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
1322                 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
1323                                 cipher_param, op, qat_req);
1324                 min_ofs = op->sym->aead.data.offset;
1325         }
1326
1327         if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
1328                 do_sgl = 1;
1329
1330         /* adjust for chain case */
1331         if (do_cipher && do_auth)
1332                 min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
1333
1334         if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
1335                 min_ofs = 0;
1336
1337         if (unlikely(op->sym->m_dst != NULL)) {
1338                 /* Out-of-place operation (OOP)
1339                  * Don't align DMA start. DMA the minimum data-set
1340                  * so as not to overwrite data in dest buffer
1341                  */
1342                 src_buf_start =
1343                         rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
1344                 dst_buf_start =
1345                         rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
1346
1347         } else {
1348                 /* In-place operation
1349                  * Start DMA at nearest aligned address below min_ofs
1350                  */
1351                 src_buf_start =
1352                         rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
1353                                                 & QAT_64_BTYE_ALIGN_MASK;
1354
1355                 if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
1356                                         rte_pktmbuf_headroom(op->sym->m_src))
1357                                                         > src_buf_start)) {
1358                         /* alignment has pushed addr ahead of start of mbuf
1359                          * so revert and take the performance hit
1360                          */
1361                         src_buf_start =
1362                                 rte_pktmbuf_mtophys_offset(op->sym->m_src,
1363                                                                 min_ofs);
1364                 }
1365                 dst_buf_start = src_buf_start;
1366         }
1367
1368         if (do_cipher || do_aead) {
1369                 cipher_param->cipher_offset =
1370                                 (uint32_t)rte_pktmbuf_mtophys_offset(
1371                                 op->sym->m_src, cipher_ofs) - src_buf_start;
1372                 cipher_param->cipher_length = cipher_len;
1373         } else {
1374                 cipher_param->cipher_offset = 0;
1375                 cipher_param->cipher_length = 0;
1376         }
1377
1378         if (do_auth || do_aead) {
1379                 auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
1380                                 op->sym->m_src, auth_ofs) - src_buf_start;
1381                 auth_param->auth_len = auth_len;
1382         } else {
1383                 auth_param->auth_off = 0;
1384                 auth_param->auth_len = 0;
1385         }
1386
1387         qat_req->comn_mid.dst_length =
1388                 qat_req->comn_mid.src_length =
1389                 (cipher_param->cipher_offset + cipher_param->cipher_length)
1390                 > (auth_param->auth_off + auth_param->auth_len) ?
1391                 (cipher_param->cipher_offset + cipher_param->cipher_length)
1392                 : (auth_param->auth_off + auth_param->auth_len);
1393
1394         if (do_sgl) {
1395
1396                 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
1397                                 QAT_COMN_PTR_TYPE_SGL);
1398                 ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
1399                                 &qat_op_cookie->qat_sgl_list_src,
1400                                 qat_req->comn_mid.src_length);
1401                 if (ret) {
1402                         PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
1403                         return ret;
1404                 }
1405
1406                 if (likely(op->sym->m_dst == NULL))
1407                         qat_req->comn_mid.dest_data_addr =
1408                                 qat_req->comn_mid.src_data_addr =
1409                                 qat_op_cookie->qat_sgl_src_phys_addr;
1410                 else {
1411                         ret = qat_sgl_fill_array(op->sym->m_dst,
1412                                         dst_buf_start,
1413                                         &qat_op_cookie->qat_sgl_list_dst,
1414                                                 qat_req->comn_mid.dst_length);
1415
1416                         if (ret) {
1417                                 PMD_DRV_LOG(ERR, "QAT PMD Cannot "
1418                                                 "fill sgl array");
1419                                 return ret;
1420                         }
1421
1422                         qat_req->comn_mid.src_data_addr =
1423                                 qat_op_cookie->qat_sgl_src_phys_addr;
1424                         qat_req->comn_mid.dest_data_addr =
1425                                         qat_op_cookie->qat_sgl_dst_phys_addr;
1426                 }
1427         } else {
1428                 qat_req->comn_mid.src_data_addr = src_buf_start;
1429                 qat_req->comn_mid.dest_data_addr = dst_buf_start;
1430         }
1431
1432 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1433         rte_hexdump(stdout, "qat_req:", qat_req,
1434                         sizeof(struct icp_qat_fw_la_bulk_req));
1435         rte_hexdump(stdout, "src_data:",
1436                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
1437                         rte_pktmbuf_data_len(op->sym->m_src));
1438         if (do_cipher) {
1439                 uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
1440                                                 uint8_t *,
1441                                                 ctx->cipher_iv.offset);
1442                 rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
1443                                 ctx->cipher_iv.length);
1444         }
1445
1446         if (do_auth) {
1447                 if (ctx->auth_iv.length) {
1448                         uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
1449                                                         uint8_t *,
1450                                                         ctx->auth_iv.offset);
1451                         rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
1452                                                 ctx->auth_iv.length);
1453                 }
1454                 rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
1455                                 ctx->digest_length);
1456         }
1457
1458         if (do_aead) {
1459                 rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
1460                                 ctx->digest_length);
1461                 rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
1462                                 ctx->aad_len);
1463         }
1464 #endif
1465         return 0;
1466 }
1467
1468 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
1469 {
1470         uint32_t div = data >> shift;
1471         uint32_t mult = div << shift;
1472
1473         return data - mult;
1474 }
1475
1476 int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
1477                 __rte_unused struct rte_cryptodev_config *config)
1478 {
1479         PMD_INIT_FUNC_TRACE();
1480         return 0;
1481 }
1482
1483 int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
1484 {
1485         PMD_INIT_FUNC_TRACE();
1486         return 0;
1487 }
1488
1489 void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
1490 {
1491         PMD_INIT_FUNC_TRACE();
1492 }
1493
1494 int qat_dev_close(struct rte_cryptodev *dev)
1495 {
1496         int i, ret;
1497
1498         PMD_INIT_FUNC_TRACE();
1499
1500         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1501                 ret = qat_crypto_sym_qp_release(dev, i);
1502                 if (ret < 0)
1503                         return ret;
1504         }
1505
1506         return 0;
1507 }
1508
1509 void qat_dev_info_get(struct rte_cryptodev *dev,
1510                         struct rte_cryptodev_info *info)
1511 {
1512         struct qat_pmd_private *internals = dev->data->dev_private;
1513
1514         PMD_INIT_FUNC_TRACE();
1515         if (info != NULL) {
1516                 info->max_nb_queue_pairs =
1517                                 ADF_NUM_SYM_QPS_PER_BUNDLE *
1518                                 ADF_NUM_BUNDLES_PER_DEV;
1519                 info->feature_flags = dev->feature_flags;
1520                 info->capabilities = internals->qat_dev_capabilities;
1521                 info->sym.max_nb_sessions = internals->max_nb_sessions;
1522                 info->driver_id = cryptodev_qat_driver_id;
1523                 info->pci_dev = RTE_DEV_TO_PCI(dev->device);
1524         }
1525 }
1526
1527 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
1528                 struct rte_cryptodev_stats *stats)
1529 {
1530         int i;
1531         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1532
1533         PMD_INIT_FUNC_TRACE();
1534         if (stats == NULL) {
1535                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1536                 return;
1537         }
1538         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1539                 if (qp[i] == NULL) {
1540                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1541                         continue;
1542                 }
1543
1544                 stats->enqueued_count += qp[i]->stats.enqueued_count;
1545                 stats->dequeued_count += qp[i]->stats.dequeued_count;
1546                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
1547                 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
1548         }
1549 }
1550
1551 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
1552 {
1553         int i;
1554         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1555
1556         PMD_INIT_FUNC_TRACE();
1557         for (i = 0; i < dev->data->nb_queue_pairs; i++)
1558                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
1559         PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
1560 }