crypto/qat: remove dependency on ether library
[dpdk.git] / drivers / crypto / qat / qat_crypto.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *       * Redistributions of source code must retain the above copyright
12  *         notice, this list of conditions and the following disclaimer.
13  *       * Redistributions in binary form must reproduce the above copyright
14  *         notice, this list of conditions and the following disclaimer in
15  *         the documentation and/or other materials provided with the
16  *         distribution.
17  *       * Neither the name of Intel Corporation nor the names of its
18  *         contributors may be used to endorse or promote products derived
19  *         from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <string.h>
38 #include <inttypes.h>
39 #include <errno.h>
40 #include <sys/queue.h>
41 #include <stdarg.h>
42
43 #include <rte_common.h>
44 #include <rte_log.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_tailq.h>
49 #include <rte_malloc.h>
50 #include <rte_launch.h>
51 #include <rte_eal.h>
52 #include <rte_per_lcore.h>
53 #include <rte_lcore.h>
54 #include <rte_atomic.h>
55 #include <rte_branch_prediction.h>
56 #include <rte_mempool.h>
57 #include <rte_mbuf.h>
58 #include <rte_string_fns.h>
59 #include <rte_spinlock.h>
60 #include <rte_hexdump.h>
61 #include <rte_crypto_sym.h>
62 #include <rte_cryptodev_pci.h>
63 #include <openssl/evp.h>
64
65 #include "qat_logs.h"
66 #include "qat_algs.h"
67 #include "qat_crypto.h"
68 #include "adf_transport_access_macros.h"
69
70 #define BYTE_LENGTH    8
71
72 static int
73 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
74                 struct qat_pmd_private *internals) {
75         int i = 0;
76         const struct rte_cryptodev_capabilities *capability;
77
78         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
79                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
80                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
81                         continue;
82
83                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
84                         continue;
85
86                 if (capability->sym.cipher.algo == algo)
87                         return 1;
88         }
89         return 0;
90 }
91
92 static int
93 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
94                 struct qat_pmd_private *internals) {
95         int i = 0;
96         const struct rte_cryptodev_capabilities *capability;
97
98         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
99                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
100                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
101                         continue;
102
103                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
104                         continue;
105
106                 if (capability->sym.auth.algo == algo)
107                         return 1;
108         }
109         return 0;
110 }
111
112 /** Encrypt a single partial block
113  *  Depends on openssl libcrypto
114  *  Uses ECB+XOR to do CFB encryption, same result, more performant
115  */
116 static inline int
117 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
118                 uint8_t *iv, int ivlen, int srclen,
119                 void *bpi_ctx)
120 {
121         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
122         int encrypted_ivlen;
123         uint8_t encrypted_iv[16];
124         int i;
125
126         /* ECB method: encrypt the IV, then XOR this with plaintext */
127         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
128                                                                 <= 0)
129                 goto cipher_encrypt_err;
130
131         for (i = 0; i < srclen; i++)
132                 *(dst+i) = *(src+i)^(encrypted_iv[i]);
133
134         return 0;
135
136 cipher_encrypt_err:
137         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
138         return -EINVAL;
139 }
140
141 /** Decrypt a single partial block
142  *  Depends on openssl libcrypto
143  *  Uses ECB+XOR to do CFB encryption, same result, more performant
144  */
145 static inline int
146 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
147                 uint8_t *iv, int ivlen, int srclen,
148                 void *bpi_ctx)
149 {
150         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
151         int encrypted_ivlen;
152         uint8_t encrypted_iv[16];
153         int i;
154
155         /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
156         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
157                                                                 <= 0)
158                 goto cipher_decrypt_err;
159
160         for (i = 0; i < srclen; i++)
161                 *(dst+i) = *(src+i)^(encrypted_iv[i]);
162
163         return 0;
164
165 cipher_decrypt_err:
166         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed");
167         return -EINVAL;
168 }
169
170 /** Creates a context in either AES or DES in ECB mode
171  *  Depends on openssl libcrypto
172  */
173 static int
174 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
175                 enum rte_crypto_cipher_operation direction __rte_unused,
176                 uint8_t *key, void **ctx)
177 {
178         const EVP_CIPHER *algo = NULL;
179         int ret;
180         *ctx = EVP_CIPHER_CTX_new();
181
182         if (*ctx == NULL) {
183                 ret = -ENOMEM;
184                 goto ctx_init_err;
185         }
186
187         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
188                 algo = EVP_des_ecb();
189         else
190                 algo = EVP_aes_128_ecb();
191
192         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
193         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
194                 ret = -EINVAL;
195                 goto ctx_init_err;
196         }
197
198         return 0;
199
200 ctx_init_err:
201         if (*ctx != NULL)
202                 EVP_CIPHER_CTX_free(*ctx);
203         return ret;
204 }
205
206 /** Frees a context previously created
207  *  Depends on openssl libcrypto
208  */
209 static void
210 bpi_cipher_ctx_free(void *bpi_ctx)
211 {
212         if (bpi_ctx != NULL)
213                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
214 }
215
216 static inline uint32_t
217 adf_modulo(uint32_t data, uint32_t shift);
218
219 static inline int
220 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
221                 struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
222
223 void
224 qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
225                 struct rte_cryptodev_sym_session *sess)
226 {
227         PMD_INIT_FUNC_TRACE();
228         uint8_t index = dev->driver_id;
229         void *sess_priv = get_session_private_data(sess, index);
230         struct qat_session *s = (struct qat_session *)sess_priv;
231
232         if (sess_priv) {
233                 if (s->bpi_ctx)
234                         bpi_cipher_ctx_free(s->bpi_ctx);
235                 memset(s, 0, qat_crypto_sym_get_session_private_size(dev));
236                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
237                 set_session_private_data(sess, index, NULL);
238                 rte_mempool_put(sess_mp, sess_priv);
239         }
240 }
241
242 static int
243 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
244 {
245         /* Cipher Only */
246         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
247                 return ICP_QAT_FW_LA_CMD_CIPHER;
248
249         /* Authentication Only */
250         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
251                 return ICP_QAT_FW_LA_CMD_AUTH;
252
253         /* AEAD */
254         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
255                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
256                         return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
257                 else
258                         return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
259         }
260
261         if (xform->next == NULL)
262                 return -1;
263
264         /* Cipher then Authenticate */
265         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
266                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
267                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
268
269         /* Authenticate then Cipher */
270         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
271                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
272                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
273
274         return -1;
275 }
276
277 static struct rte_crypto_auth_xform *
278 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
279 {
280         do {
281                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
282                         return &xform->auth;
283
284                 xform = xform->next;
285         } while (xform);
286
287         return NULL;
288 }
289
290 static struct rte_crypto_cipher_xform *
291 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
292 {
293         do {
294                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
295                         return &xform->cipher;
296
297                 xform = xform->next;
298         } while (xform);
299
300         return NULL;
301 }
302
303 int
304 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
305                 struct rte_crypto_sym_xform *xform,
306                 struct qat_session *session)
307 {
308         struct qat_pmd_private *internals = dev->data->dev_private;
309         struct rte_crypto_cipher_xform *cipher_xform = NULL;
310         int ret;
311
312         /* Get cipher xform from crypto xform chain */
313         cipher_xform = qat_get_cipher_xform(xform);
314
315         session->cipher_iv.offset = cipher_xform->iv.offset;
316         session->cipher_iv.length = cipher_xform->iv.length;
317
318         switch (cipher_xform->algo) {
319         case RTE_CRYPTO_CIPHER_AES_CBC:
320                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
321                                 &session->qat_cipher_alg) != 0) {
322                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
323                         ret = -EINVAL;
324                         goto error_out;
325                 }
326                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
327                 break;
328         case RTE_CRYPTO_CIPHER_AES_CTR:
329                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
330                                 &session->qat_cipher_alg) != 0) {
331                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
332                         ret = -EINVAL;
333                         goto error_out;
334                 }
335                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
336                 break;
337         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
338                 if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
339                                         &session->qat_cipher_alg) != 0) {
340                         PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
341                         ret = -EINVAL;
342                         goto error_out;
343                 }
344                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
345                 break;
346         case RTE_CRYPTO_CIPHER_NULL:
347                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
348                 break;
349         case RTE_CRYPTO_CIPHER_KASUMI_F8:
350                 if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
351                                         &session->qat_cipher_alg) != 0) {
352                         PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
353                         ret = -EINVAL;
354                         goto error_out;
355                 }
356                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
357                 break;
358         case RTE_CRYPTO_CIPHER_3DES_CBC:
359                 if (qat_alg_validate_3des_key(cipher_xform->key.length,
360                                 &session->qat_cipher_alg) != 0) {
361                         PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
362                         ret = -EINVAL;
363                         goto error_out;
364                 }
365                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
366                 break;
367         case RTE_CRYPTO_CIPHER_DES_CBC:
368                 if (qat_alg_validate_des_key(cipher_xform->key.length,
369                                 &session->qat_cipher_alg) != 0) {
370                         PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
371                         ret = -EINVAL;
372                         goto error_out;
373                 }
374                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
375                 break;
376         case RTE_CRYPTO_CIPHER_3DES_CTR:
377                 if (qat_alg_validate_3des_key(cipher_xform->key.length,
378                                 &session->qat_cipher_alg) != 0) {
379                         PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
380                         ret = -EINVAL;
381                         goto error_out;
382                 }
383                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
384                 break;
385         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
386                 ret = bpi_cipher_ctx_init(
387                                         cipher_xform->algo,
388                                         cipher_xform->op,
389                                         cipher_xform->key.data,
390                                         &session->bpi_ctx);
391                 if (ret != 0) {
392                         PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
393                         goto error_out;
394                 }
395                 if (qat_alg_validate_des_key(cipher_xform->key.length,
396                                 &session->qat_cipher_alg) != 0) {
397                         PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
398                         ret = -EINVAL;
399                         goto error_out;
400                 }
401                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
402                 break;
403         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
404                 ret = bpi_cipher_ctx_init(
405                                         cipher_xform->algo,
406                                         cipher_xform->op,
407                                         cipher_xform->key.data,
408                                         &session->bpi_ctx);
409                 if (ret != 0) {
410                         PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
411                         goto error_out;
412                 }
413                 if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
414                                 &session->qat_cipher_alg) != 0) {
415                         PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
416                         ret = -EINVAL;
417                         goto error_out;
418                 }
419                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
420                 break;
421         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
422                 if (!qat_is_cipher_alg_supported(
423                         cipher_xform->algo, internals)) {
424                         PMD_DRV_LOG(ERR, "%s not supported on this device",
425                                 rte_crypto_cipher_algorithm_strings
426                                         [cipher_xform->algo]);
427                         ret = -ENOTSUP;
428                         goto error_out;
429                 }
430                 if (qat_alg_validate_zuc_key(cipher_xform->key.length,
431                                 &session->qat_cipher_alg) != 0) {
432                         PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
433                         ret = -EINVAL;
434                         goto error_out;
435                 }
436                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
437                 break;
438         case RTE_CRYPTO_CIPHER_3DES_ECB:
439         case RTE_CRYPTO_CIPHER_AES_ECB:
440         case RTE_CRYPTO_CIPHER_AES_F8:
441         case RTE_CRYPTO_CIPHER_AES_XTS:
442         case RTE_CRYPTO_CIPHER_ARC4:
443                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
444                                 cipher_xform->algo);
445                 ret = -ENOTSUP;
446                 goto error_out;
447         default:
448                 PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
449                                 cipher_xform->algo);
450                 ret = -EINVAL;
451                 goto error_out;
452         }
453
454         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
455                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
456         else
457                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
458
459         if (qat_alg_aead_session_create_content_desc_cipher(session,
460                                                 cipher_xform->key.data,
461                                                 cipher_xform->key.length)) {
462                 ret = -EINVAL;
463                 goto error_out;
464         }
465
466         return 0;
467
468 error_out:
469         if (session->bpi_ctx) {
470                 bpi_cipher_ctx_free(session->bpi_ctx);
471                 session->bpi_ctx = NULL;
472         }
473         return ret;
474 }
475
476 int
477 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
478                 struct rte_crypto_sym_xform *xform,
479                 struct rte_cryptodev_sym_session *sess,
480                 struct rte_mempool *mempool)
481 {
482         void *sess_private_data;
483         int ret;
484
485         if (rte_mempool_get(mempool, &sess_private_data)) {
486                 CDEV_LOG_ERR(
487                         "Couldn't get object from session mempool");
488                 return -ENOMEM;
489         }
490
491         ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data);
492         if (ret != 0) {
493                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure "
494                                 "session parameters");
495
496                 /* Return session to mempool */
497                 rte_mempool_put(mempool, sess_private_data);
498                 return ret;
499         }
500
501         set_session_private_data(sess, dev->driver_id,
502                 sess_private_data);
503
504         return 0;
505 }
506
507 int
508 qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
509                 struct rte_crypto_sym_xform *xform, void *session_private)
510 {
511         struct qat_session *session = session_private;
512         int ret;
513
514         int qat_cmd_id;
515         PMD_INIT_FUNC_TRACE();
516
517         /* Set context descriptor physical address */
518         session->cd_paddr = rte_mempool_virt2phy(NULL, session) +
519                         offsetof(struct qat_session, cd);
520
521         session->min_qat_dev_gen = QAT_GEN1;
522
523         /* Get requested QAT command id */
524         qat_cmd_id = qat_get_cmd_id(xform);
525         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
526                 PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
527                 return -ENOTSUP;
528         }
529         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
530         switch (session->qat_cmd) {
531         case ICP_QAT_FW_LA_CMD_CIPHER:
532                 ret = qat_crypto_sym_configure_session_cipher(dev, xform, session);
533                 if (ret < 0)
534                         return ret;
535                 break;
536         case ICP_QAT_FW_LA_CMD_AUTH:
537                 ret = qat_crypto_sym_configure_session_auth(dev, xform, session);
538                 if (ret < 0)
539                         return ret;
540                 break;
541         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
542                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
543                         ret = qat_crypto_sym_configure_session_aead(xform,
544                                         session);
545                         if (ret < 0)
546                                 return ret;
547                 } else {
548                         ret = qat_crypto_sym_configure_session_cipher(dev,
549                                         xform, session);
550                         if (ret < 0)
551                                 return ret;
552                         ret = qat_crypto_sym_configure_session_auth(dev,
553                                         xform, session);
554                         if (ret < 0)
555                                 return ret;
556                 }
557                 break;
558         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
559                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
560                         ret = qat_crypto_sym_configure_session_aead(xform,
561                                         session);
562                         if (ret < 0)
563                                 return ret;
564                 } else {
565                         ret = qat_crypto_sym_configure_session_auth(dev,
566                                         xform, session);
567                         if (ret < 0)
568                                 return ret;
569                         ret = qat_crypto_sym_configure_session_cipher(dev,
570                                         xform, session);
571                         if (ret < 0)
572                                 return ret;
573                 }
574                 break;
575         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
576         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
577         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
578         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
579         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
580         case ICP_QAT_FW_LA_CMD_MGF1:
581         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
582         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
583         case ICP_QAT_FW_LA_CMD_DELIMITER:
584         PMD_DRV_LOG(ERR, "Unsupported Service %u",
585                 session->qat_cmd);
586                 return -ENOTSUP;
587         default:
588         PMD_DRV_LOG(ERR, "Unsupported Service %u",
589                 session->qat_cmd);
590                 return -ENOTSUP;
591         }
592
593         return 0;
594 }
595
596 int
597 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
598                                 struct rte_crypto_sym_xform *xform,
599                                 struct qat_session *session)
600 {
601         struct rte_crypto_auth_xform *auth_xform = NULL;
602         struct qat_pmd_private *internals = dev->data->dev_private;
603         auth_xform = qat_get_auth_xform(xform);
604         uint8_t *key_data = auth_xform->key.data;
605         uint8_t key_length = auth_xform->key.length;
606
607         switch (auth_xform->algo) {
608         case RTE_CRYPTO_AUTH_SHA1_HMAC:
609                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
610                 break;
611         case RTE_CRYPTO_AUTH_SHA224_HMAC:
612                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
613                 break;
614         case RTE_CRYPTO_AUTH_SHA256_HMAC:
615                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
616                 break;
617         case RTE_CRYPTO_AUTH_SHA384_HMAC:
618                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
619                 break;
620         case RTE_CRYPTO_AUTH_SHA512_HMAC:
621                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
622                 break;
623         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
624                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
625                 break;
626         case RTE_CRYPTO_AUTH_AES_GMAC:
627                 if (qat_alg_validate_aes_key(auth_xform->key.length,
628                                 &session->qat_cipher_alg) != 0) {
629                         PMD_DRV_LOG(ERR, "Invalid AES key size");
630                         return -EINVAL;
631                 }
632                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
633                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
634
635                 break;
636         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
637                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
638                 break;
639         case RTE_CRYPTO_AUTH_MD5_HMAC:
640                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
641                 break;
642         case RTE_CRYPTO_AUTH_NULL:
643                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
644                 break;
645         case RTE_CRYPTO_AUTH_KASUMI_F9:
646                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
647                 break;
648         case RTE_CRYPTO_AUTH_ZUC_EIA3:
649                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
650                         PMD_DRV_LOG(ERR, "%s not supported on this device",
651                                 rte_crypto_auth_algorithm_strings
652                                 [auth_xform->algo]);
653                         return -ENOTSUP;
654                 }
655                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
656                 break;
657         case RTE_CRYPTO_AUTH_SHA1:
658         case RTE_CRYPTO_AUTH_SHA256:
659         case RTE_CRYPTO_AUTH_SHA512:
660         case RTE_CRYPTO_AUTH_SHA224:
661         case RTE_CRYPTO_AUTH_SHA384:
662         case RTE_CRYPTO_AUTH_MD5:
663         case RTE_CRYPTO_AUTH_AES_CMAC:
664         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
665                 PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
666                                 auth_xform->algo);
667                 return -ENOTSUP;
668         default:
669                 PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
670                                 auth_xform->algo);
671                 return -EINVAL;
672         }
673
674         session->auth_iv.offset = auth_xform->iv.offset;
675         session->auth_iv.length = auth_xform->iv.length;
676
677         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
678                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
679                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
680                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
681                         /*
682                          * It needs to create cipher desc content first,
683                          * then authentication
684                          */
685                         if (qat_alg_aead_session_create_content_desc_cipher(session,
686                                                 auth_xform->key.data,
687                                                 auth_xform->key.length))
688                                 return -EINVAL;
689
690                         if (qat_alg_aead_session_create_content_desc_auth(session,
691                                                 key_data,
692                                                 key_length,
693                                                 0,
694                                                 auth_xform->digest_length,
695                                                 auth_xform->op))
696                                 return -EINVAL;
697                 } else {
698                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
699                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
700                         /*
701                          * It needs to create authentication desc content first,
702                          * then cipher
703                          */
704                         if (qat_alg_aead_session_create_content_desc_auth(session,
705                                         key_data,
706                                         key_length,
707                                         0,
708                                         auth_xform->digest_length,
709                                         auth_xform->op))
710                                 return -EINVAL;
711
712                         if (qat_alg_aead_session_create_content_desc_cipher(session,
713                                                 auth_xform->key.data,
714                                                 auth_xform->key.length))
715                                 return -EINVAL;
716                 }
717                 /* Restore to authentication only only */
718                 session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
719         } else {
720                 if (qat_alg_aead_session_create_content_desc_auth(session,
721                                 key_data,
722                                 key_length,
723                                 0,
724                                 auth_xform->digest_length,
725                                 auth_xform->op))
726                         return -EINVAL;
727         }
728
729         session->digest_length = auth_xform->digest_length;
730         return 0;
731 }
732
733 int
734 qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
735                                 struct qat_session *session)
736 {
737         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
738
739         /*
740          * Store AEAD IV parameters as cipher IV,
741          * to avoid unnecessary memory usage
742          */
743         session->cipher_iv.offset = xform->aead.iv.offset;
744         session->cipher_iv.length = xform->aead.iv.length;
745
746         switch (aead_xform->algo) {
747         case RTE_CRYPTO_AEAD_AES_GCM:
748                 if (qat_alg_validate_aes_key(aead_xform->key.length,
749                                 &session->qat_cipher_alg) != 0) {
750                         PMD_DRV_LOG(ERR, "Invalid AES key size");
751                         return -EINVAL;
752                 }
753                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
754                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
755                 break;
756         case RTE_CRYPTO_AEAD_AES_CCM:
757                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported AEAD alg %u",
758                                 aead_xform->algo);
759                 return -ENOTSUP;
760         default:
761                 PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
762                                 aead_xform->algo);
763                 return -EINVAL;
764         }
765
766         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
767                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
768                 /*
769                  * It needs to create cipher desc content first,
770                  * then authentication
771                  */
772                 if (qat_alg_aead_session_create_content_desc_cipher(session,
773                                         aead_xform->key.data,
774                                         aead_xform->key.length))
775                         return -EINVAL;
776
777                 if (qat_alg_aead_session_create_content_desc_auth(session,
778                                         aead_xform->key.data,
779                                         aead_xform->key.length,
780                                         aead_xform->aad_length,
781                                         aead_xform->digest_length,
782                                         RTE_CRYPTO_AUTH_OP_GENERATE))
783                         return -EINVAL;
784         } else {
785                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
786                 /*
787                  * It needs to create authentication desc content first,
788                  * then cipher
789                  */
790                 if (qat_alg_aead_session_create_content_desc_auth(session,
791                                         aead_xform->key.data,
792                                         aead_xform->key.length,
793                                         aead_xform->aad_length,
794                                         aead_xform->digest_length,
795                                         RTE_CRYPTO_AUTH_OP_VERIFY))
796                         return -EINVAL;
797
798                 if (qat_alg_aead_session_create_content_desc_cipher(session,
799                                         aead_xform->key.data,
800                                         aead_xform->key.length))
801                         return -EINVAL;
802         }
803
804         session->digest_length = aead_xform->digest_length;
805         return 0;
806 }
807
808 unsigned qat_crypto_sym_get_session_private_size(
809                 struct rte_cryptodev *dev __rte_unused)
810 {
811         return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
812 }
813
814 static inline uint32_t
815 qat_bpicipher_preprocess(struct qat_session *ctx,
816                                 struct rte_crypto_op *op)
817 {
818         uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
819         struct rte_crypto_sym_op *sym_op = op->sym;
820         uint8_t last_block_len = block_len > 0 ?
821                         sym_op->cipher.data.length % block_len : 0;
822
823         if (last_block_len &&
824                         ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
825
826                 /* Decrypt last block */
827                 uint8_t *last_block, *dst, *iv;
828                 uint32_t last_block_offset = sym_op->cipher.data.offset +
829                                 sym_op->cipher.data.length - last_block_len;
830                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
831                                 uint8_t *, last_block_offset);
832
833                 if (unlikely(sym_op->m_dst != NULL))
834                         /* out-of-place operation (OOP) */
835                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
836                                                 uint8_t *, last_block_offset);
837                 else
838                         dst = last_block;
839
840                 if (last_block_len < sym_op->cipher.data.length)
841                         /* use previous block ciphertext as IV */
842                         iv = last_block - block_len;
843                 else
844                         /* runt block, i.e. less than one full block */
845                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
846                                         ctx->cipher_iv.offset);
847
848 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
849                 rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
850                         last_block_len);
851                 if (sym_op->m_dst != NULL)
852                         rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
853                                 last_block_len);
854 #endif
855                 bpi_cipher_decrypt(last_block, dst, iv, block_len,
856                                 last_block_len, ctx->bpi_ctx);
857 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
858                 rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
859                         last_block_len);
860                 if (sym_op->m_dst != NULL)
861                         rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
862                                 last_block_len);
863 #endif
864         }
865
866         return sym_op->cipher.data.length - last_block_len;
867 }
868
869 static inline uint32_t
870 qat_bpicipher_postprocess(struct qat_session *ctx,
871                                 struct rte_crypto_op *op)
872 {
873         uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
874         struct rte_crypto_sym_op *sym_op = op->sym;
875         uint8_t last_block_len = block_len > 0 ?
876                         sym_op->cipher.data.length % block_len : 0;
877
878         if (last_block_len > 0 &&
879                         ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
880
881                 /* Encrypt last block */
882                 uint8_t *last_block, *dst, *iv;
883                 uint32_t last_block_offset;
884
885                 last_block_offset = sym_op->cipher.data.offset +
886                                 sym_op->cipher.data.length - last_block_len;
887                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
888                                 uint8_t *, last_block_offset);
889
890                 if (unlikely(sym_op->m_dst != NULL))
891                         /* out-of-place operation (OOP) */
892                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
893                                                 uint8_t *, last_block_offset);
894                 else
895                         dst = last_block;
896
897                 if (last_block_len < sym_op->cipher.data.length)
898                         /* use previous block ciphertext as IV */
899                         iv = dst - block_len;
900                 else
901                         /* runt block, i.e. less than one full block */
902                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
903                                         ctx->cipher_iv.offset);
904
905 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
906                 rte_hexdump(stdout, "BPI: src before post-process:", last_block,
907                         last_block_len);
908                 if (sym_op->m_dst != NULL)
909                         rte_hexdump(stdout, "BPI: dst before post-process:",
910                                         dst, last_block_len);
911 #endif
912                 bpi_cipher_encrypt(last_block, dst, iv, block_len,
913                                 last_block_len, ctx->bpi_ctx);
914 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
915                 rte_hexdump(stdout, "BPI: src after post-process:", last_block,
916                         last_block_len);
917                 if (sym_op->m_dst != NULL)
918                         rte_hexdump(stdout, "BPI: dst after post-process:", dst,
919                                 last_block_len);
920 #endif
921         }
922         return sym_op->cipher.data.length - last_block_len;
923 }
924
925 uint16_t
926 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
927                 uint16_t nb_ops)
928 {
929         register struct qat_queue *queue;
930         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
931         register uint32_t nb_ops_sent = 0;
932         register struct rte_crypto_op **cur_op = ops;
933         register int ret;
934         uint16_t nb_ops_possible = nb_ops;
935         register uint8_t *base_addr;
936         register uint32_t tail;
937         int overflow;
938
939         if (unlikely(nb_ops == 0))
940                 return 0;
941
942         /* read params used a lot in main loop into registers */
943         queue = &(tmp_qp->tx_q);
944         base_addr = (uint8_t *)queue->base_addr;
945         tail = queue->tail;
946
947         /* Find how many can actually fit on the ring */
948         overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
949                                 - queue->max_inflights;
950         if (overflow > 0) {
951                 rte_atomic16_sub(&tmp_qp->inflights16, overflow);
952                 nb_ops_possible = nb_ops - overflow;
953                 if (nb_ops_possible == 0)
954                         return 0;
955         }
956
957         while (nb_ops_sent != nb_ops_possible) {
958                 ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
959                         tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
960                 if (ret != 0) {
961                         tmp_qp->stats.enqueue_err_count++;
962                         /*
963                          * This message cannot be enqueued,
964                          * decrease number of ops that wasn't sent
965                          */
966                         rte_atomic16_sub(&tmp_qp->inflights16,
967                                         nb_ops_possible - nb_ops_sent);
968                         if (nb_ops_sent == 0)
969                                 return 0;
970                         goto kick_tail;
971                 }
972
973                 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
974                 nb_ops_sent++;
975                 cur_op++;
976         }
977 kick_tail:
978         WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
979                         queue->hw_queue_number, tail);
980         queue->tail = tail;
981         tmp_qp->stats.enqueued_count += nb_ops_sent;
982         return nb_ops_sent;
983 }
984
985 uint16_t
986 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
987                 uint16_t nb_ops)
988 {
989         struct qat_queue *queue;
990         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
991         uint32_t msg_counter = 0;
992         struct rte_crypto_op *rx_op;
993         struct icp_qat_fw_comn_resp *resp_msg;
994
995         queue = &(tmp_qp->rx_q);
996         resp_msg = (struct icp_qat_fw_comn_resp *)
997                         ((uint8_t *)queue->base_addr + queue->head);
998
999         while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
1000                         msg_counter != nb_ops) {
1001                 rx_op = (struct rte_crypto_op *)(uintptr_t)
1002                                 (resp_msg->opaque_data);
1003
1004 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
1005                 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
1006                         sizeof(struct icp_qat_fw_comn_resp));
1007
1008 #endif
1009                 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
1010                                 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
1011                                         resp_msg->comn_hdr.comn_status)) {
1012                         rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1013                 } else {
1014                         struct qat_session *sess = (struct qat_session *)
1015                                         get_session_private_data(
1016                                         rx_op->sym->session,
1017                                         cryptodev_qat_driver_id);
1018
1019                         if (sess->bpi_ctx)
1020                                 qat_bpicipher_postprocess(sess, rx_op);
1021                         rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1022                 }
1023
1024                 *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
1025                 queue->head = adf_modulo(queue->head +
1026                                 queue->msg_size,
1027                                 ADF_RING_SIZE_MODULO(queue->queue_size));
1028                 resp_msg = (struct icp_qat_fw_comn_resp *)
1029                                         ((uint8_t *)queue->base_addr +
1030                                                         queue->head);
1031                 *ops = rx_op;
1032                 ops++;
1033                 msg_counter++;
1034         }
1035         if (msg_counter > 0) {
1036                 WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
1037                                         queue->hw_bundle_number,
1038                                         queue->hw_queue_number, queue->head);
1039                 rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
1040                 tmp_qp->stats.dequeued_count += msg_counter;
1041         }
1042         return msg_counter;
1043 }
1044
1045 static inline int
1046 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
1047                 struct qat_alg_buf_list *list, uint32_t data_len)
1048 {
1049         int nr = 1;
1050
1051         uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
1052                         buff_start + rte_pktmbuf_data_len(buf);
1053
1054         list->bufers[0].addr = buff_start;
1055         list->bufers[0].resrvd = 0;
1056         list->bufers[0].len = buf_len;
1057
1058         if (data_len <= buf_len) {
1059                 list->num_bufs = nr;
1060                 list->bufers[0].len = data_len;
1061                 return 0;
1062         }
1063
1064         buf = buf->next;
1065         while (buf) {
1066                 if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
1067                         PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
1068                                         " entry(%u)",
1069                                         QAT_SGL_MAX_NUMBER);
1070                         return -EINVAL;
1071                 }
1072
1073                 list->bufers[nr].len = rte_pktmbuf_data_len(buf);
1074                 list->bufers[nr].resrvd = 0;
1075                 list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
1076
1077                 buf_len += list->bufers[nr].len;
1078                 buf = buf->next;
1079
1080                 if (buf_len > data_len) {
1081                         list->bufers[nr].len -=
1082                                 buf_len - data_len;
1083                         buf = NULL;
1084                 }
1085                 ++nr;
1086         }
1087         list->num_bufs = nr;
1088
1089         return 0;
1090 }
1091
1092 static inline void
1093 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
1094                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
1095                 struct rte_crypto_op *op,
1096                 struct icp_qat_fw_la_bulk_req *qat_req)
1097 {
1098         /* copy IV into request if it fits */
1099         if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
1100                 rte_memcpy(cipher_param->u.cipher_IV_array,
1101                                 rte_crypto_op_ctod_offset(op, uint8_t *,
1102                                         iv_offset),
1103                                 iv_length);
1104         } else {
1105                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
1106                                 qat_req->comn_hdr.serv_specif_flags,
1107                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
1108                 cipher_param->u.s.cipher_IV_ptr =
1109                                 rte_crypto_op_ctophys_offset(op,
1110                                         iv_offset);
1111         }
1112 }
1113
1114 static inline int
1115 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
1116                 struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
1117 {
1118         int ret = 0;
1119         struct qat_session *ctx;
1120         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1121         struct icp_qat_fw_la_auth_req_params *auth_param;
1122         register struct icp_qat_fw_la_bulk_req *qat_req;
1123         uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
1124         uint32_t cipher_len = 0, cipher_ofs = 0;
1125         uint32_t auth_len = 0, auth_ofs = 0;
1126         uint32_t min_ofs = 0;
1127         uint64_t src_buf_start = 0, dst_buf_start = 0;
1128         uint8_t do_sgl = 0;
1129
1130 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1131         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
1132                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
1133                                 "operation requests, op (%p) is not a "
1134                                 "symmetric operation.", op);
1135                 return -EINVAL;
1136         }
1137 #endif
1138         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
1139                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
1140                                 " requests, op (%p) is sessionless.", op);
1141                 return -EINVAL;
1142         }
1143
1144         ctx = (struct qat_session *)get_session_private_data(
1145                         op->sym->session, cryptodev_qat_driver_id);
1146
1147         if (unlikely(ctx == NULL)) {
1148                 PMD_DRV_LOG(ERR, "Session was not created for this device");
1149                 return -EINVAL;
1150         }
1151
1152         if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
1153                 PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
1154                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1155                 return -EINVAL;
1156         }
1157
1158         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
1159         rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
1160         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
1161         cipher_param = (void *)&qat_req->serv_specif_rqpars;
1162         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
1163
1164         if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
1165                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1166                 /* AES-GCM */
1167                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1168                                 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1169                         do_aead = 1;
1170                 } else {
1171                         do_auth = 1;
1172                         do_cipher = 1;
1173                 }
1174         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1175                 do_auth = 1;
1176                 do_cipher = 0;
1177         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1178                 do_auth = 0;
1179                 do_cipher = 1;
1180         }
1181
1182         if (do_cipher) {
1183
1184                 if (ctx->qat_cipher_alg ==
1185                                          ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
1186                         ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
1187                         ctx->qat_cipher_alg ==
1188                                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1189
1190                         if (unlikely(
1191                                 (cipher_param->cipher_length % BYTE_LENGTH != 0)
1192                                  || (cipher_param->cipher_offset
1193                                                         % BYTE_LENGTH != 0))) {
1194                                 PMD_DRV_LOG(ERR,
1195                   "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
1196                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1197                                 return -EINVAL;
1198                         }
1199                         cipher_len = op->sym->cipher.data.length >> 3;
1200                         cipher_ofs = op->sym->cipher.data.offset >> 3;
1201
1202                 } else if (ctx->bpi_ctx) {
1203                         /* DOCSIS - only send complete blocks to device
1204                          * Process any partial block using CFB mode.
1205                          * Even if 0 complete blocks, still send this to device
1206                          * to get into rx queue for post-process and dequeuing
1207                          */
1208                         cipher_len = qat_bpicipher_preprocess(ctx, op);
1209                         cipher_ofs = op->sym->cipher.data.offset;
1210                 } else {
1211                         cipher_len = op->sym->cipher.data.length;
1212                         cipher_ofs = op->sym->cipher.data.offset;
1213                 }
1214
1215                 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
1216                                 cipher_param, op, qat_req);
1217                 min_ofs = cipher_ofs;
1218         }
1219
1220         if (do_auth) {
1221
1222                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
1223                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
1224                         ctx->qat_hash_alg ==
1225                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
1226                         if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
1227                                 || (auth_param->auth_len % BYTE_LENGTH != 0))) {
1228                                 PMD_DRV_LOG(ERR,
1229                 "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
1230                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1231                                 return -EINVAL;
1232                         }
1233                         auth_ofs = op->sym->auth.data.offset >> 3;
1234                         auth_len = op->sym->auth.data.length >> 3;
1235
1236                         auth_param->u1.aad_adr =
1237                                         rte_crypto_op_ctophys_offset(op,
1238                                                         ctx->auth_iv.offset);
1239
1240                 } else if (ctx->qat_hash_alg ==
1241                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1242                                 ctx->qat_hash_alg ==
1243                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1244                         /* AES-GMAC */
1245                         set_cipher_iv(ctx->auth_iv.length,
1246                                 ctx->auth_iv.offset,
1247                                 cipher_param, op, qat_req);
1248                         auth_ofs = op->sym->auth.data.offset;
1249                         auth_len = op->sym->auth.data.length;
1250
1251                         auth_param->u1.aad_adr = 0;
1252                         auth_param->u2.aad_sz = 0;
1253
1254                         /*
1255                          * If len(iv)==12B fw computes J0
1256                          */
1257                         if (ctx->auth_iv.length == 12) {
1258                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1259                                         qat_req->comn_hdr.serv_specif_flags,
1260                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1261
1262                         }
1263                 } else {
1264                         auth_ofs = op->sym->auth.data.offset;
1265                         auth_len = op->sym->auth.data.length;
1266
1267                 }
1268                 min_ofs = auth_ofs;
1269
1270                 auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
1271
1272         }
1273
1274         if (do_aead) {
1275                 if (ctx->qat_hash_alg ==
1276                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1277                                 ctx->qat_hash_alg ==
1278                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1279                         /*
1280                          * If len(iv)==12B fw computes J0
1281                          */
1282                         if (ctx->cipher_iv.length == 12) {
1283                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1284                                         qat_req->comn_hdr.serv_specif_flags,
1285                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1286                         }
1287
1288                 }
1289
1290                 cipher_len = op->sym->aead.data.length;
1291                 cipher_ofs = op->sym->aead.data.offset;
1292                 auth_len = op->sym->aead.data.length;
1293                 auth_ofs = op->sym->aead.data.offset;
1294
1295                 auth_param->u1.aad_adr = op->sym->aead.aad.phys_addr;
1296                 auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
1297                 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
1298                                 cipher_param, op, qat_req);
1299                 min_ofs = op->sym->aead.data.offset;
1300         }
1301
1302         if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
1303                 do_sgl = 1;
1304
1305         /* adjust for chain case */
1306         if (do_cipher && do_auth)
1307                 min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
1308
1309         if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
1310                 min_ofs = 0;
1311
1312         if (unlikely(op->sym->m_dst != NULL)) {
1313                 /* Out-of-place operation (OOP)
1314                  * Don't align DMA start. DMA the minimum data-set
1315                  * so as not to overwrite data in dest buffer
1316                  */
1317                 src_buf_start =
1318                         rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
1319                 dst_buf_start =
1320                         rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
1321
1322         } else {
1323                 /* In-place operation
1324                  * Start DMA at nearest aligned address below min_ofs
1325                  */
1326                 src_buf_start =
1327                         rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
1328                                                 & QAT_64_BTYE_ALIGN_MASK;
1329
1330                 if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
1331                                         rte_pktmbuf_headroom(op->sym->m_src))
1332                                                         > src_buf_start)) {
1333                         /* alignment has pushed addr ahead of start of mbuf
1334                          * so revert and take the performance hit
1335                          */
1336                         src_buf_start =
1337                                 rte_pktmbuf_mtophys_offset(op->sym->m_src,
1338                                                                 min_ofs);
1339                 }
1340                 dst_buf_start = src_buf_start;
1341         }
1342
1343         if (do_cipher || do_aead) {
1344                 cipher_param->cipher_offset =
1345                                 (uint32_t)rte_pktmbuf_mtophys_offset(
1346                                 op->sym->m_src, cipher_ofs) - src_buf_start;
1347                 cipher_param->cipher_length = cipher_len;
1348         } else {
1349                 cipher_param->cipher_offset = 0;
1350                 cipher_param->cipher_length = 0;
1351         }
1352
1353         if (do_auth || do_aead) {
1354                 auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
1355                                 op->sym->m_src, auth_ofs) - src_buf_start;
1356                 auth_param->auth_len = auth_len;
1357         } else {
1358                 auth_param->auth_off = 0;
1359                 auth_param->auth_len = 0;
1360         }
1361
1362         qat_req->comn_mid.dst_length =
1363                 qat_req->comn_mid.src_length =
1364                 (cipher_param->cipher_offset + cipher_param->cipher_length)
1365                 > (auth_param->auth_off + auth_param->auth_len) ?
1366                 (cipher_param->cipher_offset + cipher_param->cipher_length)
1367                 : (auth_param->auth_off + auth_param->auth_len);
1368
1369         if (do_sgl) {
1370
1371                 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
1372                                 QAT_COMN_PTR_TYPE_SGL);
1373                 ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
1374                                 &qat_op_cookie->qat_sgl_list_src,
1375                                 qat_req->comn_mid.src_length);
1376                 if (ret) {
1377                         PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
1378                         return ret;
1379                 }
1380
1381                 if (likely(op->sym->m_dst == NULL))
1382                         qat_req->comn_mid.dest_data_addr =
1383                                 qat_req->comn_mid.src_data_addr =
1384                                 qat_op_cookie->qat_sgl_src_phys_addr;
1385                 else {
1386                         ret = qat_sgl_fill_array(op->sym->m_dst,
1387                                         dst_buf_start,
1388                                         &qat_op_cookie->qat_sgl_list_dst,
1389                                                 qat_req->comn_mid.dst_length);
1390
1391                         if (ret) {
1392                                 PMD_DRV_LOG(ERR, "QAT PMD Cannot "
1393                                                 "fill sgl array");
1394                                 return ret;
1395                         }
1396
1397                         qat_req->comn_mid.src_data_addr =
1398                                 qat_op_cookie->qat_sgl_src_phys_addr;
1399                         qat_req->comn_mid.dest_data_addr =
1400                                         qat_op_cookie->qat_sgl_dst_phys_addr;
1401                 }
1402         } else {
1403                 qat_req->comn_mid.src_data_addr = src_buf_start;
1404                 qat_req->comn_mid.dest_data_addr = dst_buf_start;
1405         }
1406
1407 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1408         rte_hexdump(stdout, "qat_req:", qat_req,
1409                         sizeof(struct icp_qat_fw_la_bulk_req));
1410         rte_hexdump(stdout, "src_data:",
1411                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
1412                         rte_pktmbuf_data_len(op->sym->m_src));
1413         if (do_cipher) {
1414                 uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
1415                                                 uint8_t *,
1416                                                 ctx->cipher_iv.offset);
1417                 rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
1418                                 ctx->cipher_iv.length);
1419         }
1420
1421         if (do_auth) {
1422                 if (ctx->auth_iv.length) {
1423                         uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
1424                                                         uint8_t *,
1425                                                         ctx->auth_iv.offset);
1426                         rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
1427                                                 ctx->auth_iv.length);
1428                 }
1429                 rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
1430                                 ctx->digest_length);
1431         }
1432
1433         if (do_aead) {
1434                 rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
1435                                 ctx->digest_length);
1436                 rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
1437                                 ctx->aad_len);
1438         }
1439 #endif
1440         return 0;
1441 }
1442
1443 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
1444 {
1445         uint32_t div = data >> shift;
1446         uint32_t mult = div << shift;
1447
1448         return data - mult;
1449 }
1450
1451 int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
1452                 __rte_unused struct rte_cryptodev_config *config)
1453 {
1454         PMD_INIT_FUNC_TRACE();
1455         return 0;
1456 }
1457
1458 int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
1459 {
1460         PMD_INIT_FUNC_TRACE();
1461         return 0;
1462 }
1463
1464 void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
1465 {
1466         PMD_INIT_FUNC_TRACE();
1467 }
1468
1469 int qat_dev_close(struct rte_cryptodev *dev)
1470 {
1471         int i, ret;
1472
1473         PMD_INIT_FUNC_TRACE();
1474
1475         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1476                 ret = qat_crypto_sym_qp_release(dev, i);
1477                 if (ret < 0)
1478                         return ret;
1479         }
1480
1481         return 0;
1482 }
1483
1484 void qat_dev_info_get(struct rte_cryptodev *dev,
1485                         struct rte_cryptodev_info *info)
1486 {
1487         struct qat_pmd_private *internals = dev->data->dev_private;
1488
1489         PMD_INIT_FUNC_TRACE();
1490         if (info != NULL) {
1491                 info->max_nb_queue_pairs =
1492                                 ADF_NUM_SYM_QPS_PER_BUNDLE *
1493                                 ADF_NUM_BUNDLES_PER_DEV;
1494                 info->feature_flags = dev->feature_flags;
1495                 info->capabilities = internals->qat_dev_capabilities;
1496                 info->sym.max_nb_sessions = internals->max_nb_sessions;
1497                 info->driver_id = cryptodev_qat_driver_id;
1498                 info->pci_dev = RTE_DEV_TO_PCI(dev->device);
1499         }
1500 }
1501
1502 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
1503                 struct rte_cryptodev_stats *stats)
1504 {
1505         int i;
1506         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1507
1508         PMD_INIT_FUNC_TRACE();
1509         if (stats == NULL) {
1510                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1511                 return;
1512         }
1513         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1514                 if (qp[i] == NULL) {
1515                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1516                         continue;
1517                 }
1518
1519                 stats->enqueued_count += qp[i]->stats.enqueued_count;
1520                 stats->dequeued_count += qp[i]->stats.dequeued_count;
1521                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
1522                 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
1523         }
1524 }
1525
1526 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
1527 {
1528         int i;
1529         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1530
1531         PMD_INIT_FUNC_TRACE();
1532         for (i = 0; i < dev->data->nb_queue_pairs; i++)
1533                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
1534         PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
1535 }