9678a7024df3f154f76f0d3e88a875e72c1ec31a
[dpdk.git] / drivers / crypto / qat / qat_crypto.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *       * Redistributions of source code must retain the above copyright
12  *         notice, this list of conditions and the following disclaimer.
13  *       * Redistributions in binary form must reproduce the above copyright
14  *         notice, this list of conditions and the following disclaimer in
15  *         the documentation and/or other materials provided with the
16  *         distribution.
17  *       * Neither the name of Intel Corporation nor the names of its
18  *         contributors may be used to endorse or promote products derived
19  *         from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <string.h>
38 #include <inttypes.h>
39 #include <errno.h>
40 #include <sys/queue.h>
41 #include <stdarg.h>
42
43 #include <rte_common.h>
44 #include <rte_log.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_tailq.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_launch.h>
52 #include <rte_eal.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_mempool.h>
58 #include <rte_mbuf.h>
59 #include <rte_string_fns.h>
60 #include <rte_spinlock.h>
61 #include <rte_hexdump.h>
62 #include <rte_crypto_sym.h>
63 #include <openssl/evp.h>
64
65 #include "qat_logs.h"
66 #include "qat_algs.h"
67 #include "qat_crypto.h"
68 #include "adf_transport_access_macros.h"
69
70 #define BYTE_LENGTH    8
71
72 static int __rte_unused
73 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
74                 struct qat_pmd_private *internals) {
75         int i = 0;
76         const struct rte_cryptodev_capabilities *capability;
77
78         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
79                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
80                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
81                         continue;
82
83                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
84                         continue;
85
86                 if (capability->sym.cipher.algo == algo)
87                         return 1;
88         }
89         return 0;
90 }
91
92 static int __rte_unused
93 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
94                 struct qat_pmd_private *internals) {
95         int i = 0;
96         const struct rte_cryptodev_capabilities *capability;
97
98         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
99                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
100                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
101                         continue;
102
103                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
104                         continue;
105
106                 if (capability->sym.auth.algo == algo)
107                         return 1;
108         }
109         return 0;
110 }
111
112 /** Encrypt a single partial block
113  *  Depends on openssl libcrypto
114  *  Uses ECB+XOR to do CFB encryption, same result, more performant
115  */
116 static inline int
117 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
118                 uint8_t *iv, int ivlen, int srclen,
119                 void *bpi_ctx)
120 {
121         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
122         int encrypted_ivlen;
123         uint8_t encrypted_iv[16];
124         int i;
125
126         /* ECB method: encrypt the IV, then XOR this with plaintext */
127         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
128                                                                 <= 0)
129                 goto cipher_encrypt_err;
130
131         for (i = 0; i < srclen; i++)
132                 *(dst+i) = *(src+i)^(encrypted_iv[i]);
133
134         return 0;
135
136 cipher_encrypt_err:
137         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
138         return -EINVAL;
139 }
140
141 /** Decrypt a single partial block
142  *  Depends on openssl libcrypto
143  *  Uses ECB+XOR to do CFB encryption, same result, more performant
144  */
145 static inline int
146 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
147                 uint8_t *iv, int ivlen, int srclen,
148                 void *bpi_ctx)
149 {
150         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
151         int encrypted_ivlen;
152         uint8_t encrypted_iv[16];
153         int i;
154
155         /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
156         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
157                                                                 <= 0)
158                 goto cipher_decrypt_err;
159
160         for (i = 0; i < srclen; i++)
161                 *(dst+i) = *(src+i)^(encrypted_iv[i]);
162
163         return 0;
164
165 cipher_decrypt_err:
166         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed");
167         return -EINVAL;
168 }
169
170 /** Creates a context in either AES or DES in ECB mode
171  *  Depends on openssl libcrypto
172  */
173 static void *
174 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
175                 enum rte_crypto_cipher_operation direction __rte_unused,
176                                         uint8_t *key)
177 {
178         const EVP_CIPHER *algo = NULL;
179         EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new();
180
181         if (ctx == NULL)
182                 goto ctx_init_err;
183
184         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
185                 algo = EVP_des_ecb();
186         else
187                 algo = EVP_aes_128_ecb();
188
189         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
190         if (EVP_EncryptInit_ex(ctx, algo, NULL, key, 0) != 1)
191                 goto ctx_init_err;
192
193         return ctx;
194
195 ctx_init_err:
196         if (ctx != NULL)
197                 EVP_CIPHER_CTX_free(ctx);
198         return NULL;
199 }
200
201 /** Frees a context previously created
202  *  Depends on openssl libcrypto
203  */
204 static void
205 bpi_cipher_ctx_free(void *bpi_ctx)
206 {
207         if (bpi_ctx != NULL)
208                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
209 }
210
211 static inline uint32_t
212 adf_modulo(uint32_t data, uint32_t shift);
213
214 static inline int
215 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
216                 struct qat_crypto_op_cookie *qat_op_cookie);
217
218 void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
219                 void *session)
220 {
221         struct qat_session *sess = session;
222         phys_addr_t cd_paddr;
223
224         PMD_INIT_FUNC_TRACE();
225         if (sess) {
226                 if (sess->bpi_ctx) {
227                         bpi_cipher_ctx_free(sess->bpi_ctx);
228                         sess->bpi_ctx = NULL;
229                 }
230                 cd_paddr = sess->cd_paddr;
231                 memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
232                 sess->cd_paddr = cd_paddr;
233         } else
234                 PMD_DRV_LOG(ERR, "NULL session");
235 }
236
237 static int
238 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
239 {
240         /* Cipher Only */
241         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
242                 return ICP_QAT_FW_LA_CMD_CIPHER;
243
244         /* Authentication Only */
245         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
246                 return ICP_QAT_FW_LA_CMD_AUTH;
247
248         if (xform->next == NULL)
249                 return -1;
250
251         /* Cipher then Authenticate */
252         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
253                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
254                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
255
256         /* Authenticate then Cipher */
257         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
258                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
259                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
260
261         return -1;
262 }
263
264 static struct rte_crypto_auth_xform *
265 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
266 {
267         do {
268                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
269                         return &xform->auth;
270
271                 xform = xform->next;
272         } while (xform);
273
274         return NULL;
275 }
276
277 static struct rte_crypto_cipher_xform *
278 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
279 {
280         do {
281                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
282                         return &xform->cipher;
283
284                 xform = xform->next;
285         } while (xform);
286
287         return NULL;
288 }
289 void *
290 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev __rte_unused,
291                 struct rte_crypto_sym_xform *xform, void *session_private)
292 {
293         struct qat_session *session = session_private;
294
295         struct rte_crypto_cipher_xform *cipher_xform = NULL;
296
297         /* Get cipher xform from crypto xform chain */
298         cipher_xform = qat_get_cipher_xform(xform);
299
300         switch (cipher_xform->algo) {
301         case RTE_CRYPTO_CIPHER_AES_CBC:
302                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
303                                 &session->qat_cipher_alg) != 0) {
304                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
305                         goto error_out;
306                 }
307                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
308                 break;
309         case RTE_CRYPTO_CIPHER_AES_GCM:
310                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
311                                 &session->qat_cipher_alg) != 0) {
312                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
313                         goto error_out;
314                 }
315                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
316                 break;
317         case RTE_CRYPTO_CIPHER_AES_CTR:
318                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
319                                 &session->qat_cipher_alg) != 0) {
320                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
321                         goto error_out;
322                 }
323                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
324                 break;
325         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
326                 if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
327                                         &session->qat_cipher_alg) != 0) {
328                         PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
329                         goto error_out;
330                 }
331                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
332                 break;
333         case RTE_CRYPTO_CIPHER_NULL:
334                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
335                 break;
336         case RTE_CRYPTO_CIPHER_KASUMI_F8:
337                 if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
338                                         &session->qat_cipher_alg) != 0) {
339                         PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
340                         goto error_out;
341                 }
342                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
343                 break;
344         case RTE_CRYPTO_CIPHER_3DES_CBC:
345                 if (qat_alg_validate_3des_key(cipher_xform->key.length,
346                                 &session->qat_cipher_alg) != 0) {
347                         PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
348                         goto error_out;
349                 }
350                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
351                 break;
352         case RTE_CRYPTO_CIPHER_DES_CBC:
353                 if (qat_alg_validate_des_key(cipher_xform->key.length,
354                                 &session->qat_cipher_alg) != 0) {
355                         PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
356                         goto error_out;
357                 }
358                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
359                 break;
360         case RTE_CRYPTO_CIPHER_3DES_CTR:
361                 if (qat_alg_validate_3des_key(cipher_xform->key.length,
362                                 &session->qat_cipher_alg) != 0) {
363                         PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
364                         goto error_out;
365                 }
366                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
367                 break;
368         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
369                 session->bpi_ctx = bpi_cipher_ctx_init(
370                                         cipher_xform->algo,
371                                         cipher_xform->op,
372                                         cipher_xform->key.data);
373                 if (session->bpi_ctx == NULL) {
374                         PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
375                         goto error_out;
376                 }
377                 if (qat_alg_validate_des_key(cipher_xform->key.length,
378                                 &session->qat_cipher_alg) != 0) {
379                         PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
380                         goto error_out;
381                 }
382                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
383                 break;
384         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
385                 session->bpi_ctx = bpi_cipher_ctx_init(
386                                         cipher_xform->algo,
387                                         cipher_xform->op,
388                                         cipher_xform->key.data);
389                 if (session->bpi_ctx == NULL) {
390                         PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
391                         goto error_out;
392                 }
393                 if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
394                                 &session->qat_cipher_alg) != 0) {
395                         PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
396                         goto error_out;
397                 }
398                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
399                 break;
400         case RTE_CRYPTO_CIPHER_3DES_ECB:
401         case RTE_CRYPTO_CIPHER_AES_ECB:
402         case RTE_CRYPTO_CIPHER_AES_CCM:
403         case RTE_CRYPTO_CIPHER_AES_F8:
404         case RTE_CRYPTO_CIPHER_AES_XTS:
405         case RTE_CRYPTO_CIPHER_ARC4:
406         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
407                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
408                                 cipher_xform->algo);
409                 goto error_out;
410         default:
411                 PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
412                                 cipher_xform->algo);
413                 goto error_out;
414         }
415
416         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
417                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
418         else
419                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
420
421         if (qat_alg_aead_session_create_content_desc_cipher(session,
422                                                 cipher_xform->key.data,
423                                                 cipher_xform->key.length))
424                 goto error_out;
425
426         return session;
427
428 error_out:
429         if (session->bpi_ctx) {
430                 bpi_cipher_ctx_free(session->bpi_ctx);
431                 session->bpi_ctx = NULL;
432         }
433         return NULL;
434 }
435
436
437 void *
438 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
439                 struct rte_crypto_sym_xform *xform, void *session_private)
440 {
441         struct qat_session *session = session_private;
442
443         int qat_cmd_id;
444         PMD_INIT_FUNC_TRACE();
445
446         /* Get requested QAT command id */
447         qat_cmd_id = qat_get_cmd_id(xform);
448         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
449                 PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
450                 goto error_out;
451         }
452         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
453         switch (session->qat_cmd) {
454         case ICP_QAT_FW_LA_CMD_CIPHER:
455         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
456                 break;
457         case ICP_QAT_FW_LA_CMD_AUTH:
458         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
459                 break;
460         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
461         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
462         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
463                 break;
464         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
465         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
466         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
467                 break;
468         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
469         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
470         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
471         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
472         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
473         case ICP_QAT_FW_LA_CMD_MGF1:
474         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
475         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
476         case ICP_QAT_FW_LA_CMD_DELIMITER:
477         PMD_DRV_LOG(ERR, "Unsupported Service %u",
478                 session->qat_cmd);
479                 goto error_out;
480         default:
481         PMD_DRV_LOG(ERR, "Unsupported Service %u",
482                 session->qat_cmd);
483                 goto error_out;
484         }
485
486         return session;
487
488 error_out:
489         return NULL;
490 }
491
492 struct qat_session *
493 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev __rte_unused,
494                                 struct rte_crypto_sym_xform *xform,
495                                 struct qat_session *session_private)
496 {
497
498         struct qat_session *session = session_private;
499         struct rte_crypto_auth_xform *auth_xform = NULL;
500         struct rte_crypto_cipher_xform *cipher_xform = NULL;
501         auth_xform = qat_get_auth_xform(xform);
502
503         switch (auth_xform->algo) {
504         case RTE_CRYPTO_AUTH_SHA1_HMAC:
505                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
506                 break;
507         case RTE_CRYPTO_AUTH_SHA224_HMAC:
508                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
509                 break;
510         case RTE_CRYPTO_AUTH_SHA256_HMAC:
511                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
512                 break;
513         case RTE_CRYPTO_AUTH_SHA384_HMAC:
514                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
515                 break;
516         case RTE_CRYPTO_AUTH_SHA512_HMAC:
517                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
518                 break;
519         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
520                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
521                 break;
522         case RTE_CRYPTO_AUTH_AES_GCM:
523                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
524                 break;
525         case RTE_CRYPTO_AUTH_AES_GMAC:
526                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
527                 break;
528         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
529                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
530                 break;
531         case RTE_CRYPTO_AUTH_MD5_HMAC:
532                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
533                 break;
534         case RTE_CRYPTO_AUTH_NULL:
535                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
536                 break;
537         case RTE_CRYPTO_AUTH_KASUMI_F9:
538                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
539                 break;
540         case RTE_CRYPTO_AUTH_SHA1:
541         case RTE_CRYPTO_AUTH_SHA256:
542         case RTE_CRYPTO_AUTH_SHA512:
543         case RTE_CRYPTO_AUTH_SHA224:
544         case RTE_CRYPTO_AUTH_SHA384:
545         case RTE_CRYPTO_AUTH_MD5:
546         case RTE_CRYPTO_AUTH_AES_CCM:
547         case RTE_CRYPTO_AUTH_AES_CMAC:
548         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
549         case RTE_CRYPTO_AUTH_ZUC_EIA3:
550                 PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
551                                 auth_xform->algo);
552                 goto error_out;
553         default:
554                 PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
555                                 auth_xform->algo);
556                 goto error_out;
557         }
558         cipher_xform = qat_get_cipher_xform(xform);
559
560         if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
561                         (session->qat_hash_alg ==
562                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_64))  {
563                 if (qat_alg_aead_session_create_content_desc_auth(session,
564                                 cipher_xform->key.data,
565                                 cipher_xform->key.length,
566                                 auth_xform->add_auth_data_length,
567                                 auth_xform->digest_length,
568                                 auth_xform->op))
569                         goto error_out;
570         } else {
571                 if (qat_alg_aead_session_create_content_desc_auth(session,
572                                 auth_xform->key.data,
573                                 auth_xform->key.length,
574                                 auth_xform->add_auth_data_length,
575                                 auth_xform->digest_length,
576                                 auth_xform->op))
577                         goto error_out;
578         }
579         return session;
580
581 error_out:
582         return NULL;
583 }
584
585 unsigned qat_crypto_sym_get_session_private_size(
586                 struct rte_cryptodev *dev __rte_unused)
587 {
588         return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
589 }
590
591 static inline uint32_t
592 qat_bpicipher_preprocess(struct qat_session *ctx,
593                                 struct rte_crypto_op *op)
594 {
595         uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
596         struct rte_crypto_sym_op *sym_op = op->sym;
597         uint8_t last_block_len = sym_op->cipher.data.length % block_len;
598
599         if (last_block_len &&
600                         ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
601
602                 /* Decrypt last block */
603                 uint8_t *last_block, *dst, *iv;
604                 uint32_t last_block_offset = sym_op->cipher.data.offset +
605                                 sym_op->cipher.data.length - last_block_len;
606                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
607                                 uint8_t *, last_block_offset);
608
609                 if (unlikely(sym_op->m_dst != NULL))
610                         /* out-of-place operation (OOP) */
611                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
612                                                 uint8_t *, last_block_offset);
613                 else
614                         dst = last_block;
615
616                 if (last_block_len < sym_op->cipher.data.length)
617                         /* use previous block ciphertext as IV */
618                         iv = last_block - block_len;
619                 else
620                         /* runt block, i.e. less than one full block */
621                         iv = sym_op->cipher.iv.data;
622
623 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
624                 rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
625                         last_block_len);
626                 if (sym_op->m_dst != NULL)
627                         rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
628                                 last_block_len);
629 #endif
630                 bpi_cipher_decrypt(last_block, dst, iv, block_len,
631                                 last_block_len, ctx->bpi_ctx);
632 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
633                 rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
634                         last_block_len);
635                 if (sym_op->m_dst != NULL)
636                         rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
637                                 last_block_len);
638 #endif
639         }
640
641         return sym_op->cipher.data.length - last_block_len;
642 }
643
644 static inline uint32_t
645 qat_bpicipher_postprocess(struct qat_session *ctx,
646                                 struct rte_crypto_op *op)
647 {
648         uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
649         struct rte_crypto_sym_op *sym_op = op->sym;
650         uint8_t last_block_len = sym_op->cipher.data.length % block_len;
651
652         if (last_block_len > 0 &&
653                         ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
654
655                 /* Encrypt last block */
656                 uint8_t *last_block, *dst, *iv;
657                 uint32_t last_block_offset;
658
659                 last_block_offset = sym_op->cipher.data.offset +
660                                 sym_op->cipher.data.length - last_block_len;
661                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
662                                 uint8_t *, last_block_offset);
663
664                 if (unlikely(sym_op->m_dst != NULL))
665                         /* out-of-place operation (OOP) */
666                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
667                                                 uint8_t *, last_block_offset);
668                 else
669                         dst = last_block;
670
671                 if (last_block_len < sym_op->cipher.data.length)
672                         /* use previous block ciphertext as IV */
673                         iv = dst - block_len;
674                 else
675                         /* runt block, i.e. less than one full block */
676                         iv = sym_op->cipher.iv.data;
677
678 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
679                 rte_hexdump(stdout, "BPI: src before post-process:", last_block,
680                         last_block_len);
681                 if (sym_op->m_dst != NULL)
682                         rte_hexdump(stdout, "BPI: dst before post-process:",
683                                         dst, last_block_len);
684 #endif
685                 bpi_cipher_encrypt(last_block, dst, iv, block_len,
686                                 last_block_len, ctx->bpi_ctx);
687 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
688                 rte_hexdump(stdout, "BPI: src after post-process:", last_block,
689                         last_block_len);
690                 if (sym_op->m_dst != NULL)
691                         rte_hexdump(stdout, "BPI: dst after post-process:", dst,
692                                 last_block_len);
693 #endif
694         }
695         return sym_op->cipher.data.length - last_block_len;
696 }
697
698 uint16_t
699 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
700                 uint16_t nb_ops)
701 {
702         register struct qat_queue *queue;
703         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
704         register uint32_t nb_ops_sent = 0;
705         register struct rte_crypto_op **cur_op = ops;
706         register int ret;
707         uint16_t nb_ops_possible = nb_ops;
708         register uint8_t *base_addr;
709         register uint32_t tail;
710         int overflow;
711
712         if (unlikely(nb_ops == 0))
713                 return 0;
714
715         /* read params used a lot in main loop into registers */
716         queue = &(tmp_qp->tx_q);
717         base_addr = (uint8_t *)queue->base_addr;
718         tail = queue->tail;
719
720         /* Find how many can actually fit on the ring */
721         overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
722                                 - queue->max_inflights;
723         if (overflow > 0) {
724                 rte_atomic16_sub(&tmp_qp->inflights16, overflow);
725                 nb_ops_possible = nb_ops - overflow;
726                 if (nb_ops_possible == 0)
727                         return 0;
728         }
729
730         while (nb_ops_sent != nb_ops_possible) {
731                 ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
732                                 tmp_qp->op_cookies[tail / queue->msg_size]);
733                 if (ret != 0) {
734                         tmp_qp->stats.enqueue_err_count++;
735                         /*
736                          * This message cannot be enqueued,
737                          * decrease number of ops that wasnt sent
738                          */
739                         rte_atomic16_sub(&tmp_qp->inflights16,
740                                         nb_ops_possible - nb_ops_sent);
741                         if (nb_ops_sent == 0)
742                                 return 0;
743                         goto kick_tail;
744                 }
745
746                 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
747                 nb_ops_sent++;
748                 cur_op++;
749         }
750 kick_tail:
751         WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
752                         queue->hw_queue_number, tail);
753         queue->tail = tail;
754         tmp_qp->stats.enqueued_count += nb_ops_sent;
755         return nb_ops_sent;
756 }
757
758 uint16_t
759 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
760                 uint16_t nb_ops)
761 {
762         struct qat_queue *queue;
763         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
764         uint32_t msg_counter = 0;
765         struct rte_crypto_op *rx_op;
766         struct icp_qat_fw_comn_resp *resp_msg;
767
768         queue = &(tmp_qp->rx_q);
769         resp_msg = (struct icp_qat_fw_comn_resp *)
770                         ((uint8_t *)queue->base_addr + queue->head);
771
772         while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
773                         msg_counter != nb_ops) {
774                 rx_op = (struct rte_crypto_op *)(uintptr_t)
775                                 (resp_msg->opaque_data);
776
777 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
778                 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
779                         sizeof(struct icp_qat_fw_comn_resp));
780 #endif
781                 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
782                                 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
783                                         resp_msg->comn_hdr.comn_status)) {
784                         rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
785                 } else {
786                         struct qat_session *sess = (struct qat_session *)
787                                                 (rx_op->sym->session->_private);
788                         if (sess->bpi_ctx)
789                                 qat_bpicipher_postprocess(sess, rx_op);
790                         rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
791                 }
792
793                 *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
794                 queue->head = adf_modulo(queue->head +
795                                 queue->msg_size,
796                                 ADF_RING_SIZE_MODULO(queue->queue_size));
797                 resp_msg = (struct icp_qat_fw_comn_resp *)
798                                         ((uint8_t *)queue->base_addr +
799                                                         queue->head);
800                 *ops = rx_op;
801                 ops++;
802                 msg_counter++;
803         }
804         if (msg_counter > 0) {
805                 WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
806                                         queue->hw_bundle_number,
807                                         queue->hw_queue_number, queue->head);
808                 rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
809                 tmp_qp->stats.dequeued_count += msg_counter;
810         }
811         return msg_counter;
812 }
813
814 static inline int
815 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
816                 struct qat_alg_buf_list *list, uint32_t data_len)
817 {
818         int nr = 1;
819
820         uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
821                         buff_start + rte_pktmbuf_data_len(buf);
822
823         list->bufers[0].addr = buff_start;
824         list->bufers[0].resrvd = 0;
825         list->bufers[0].len = buf_len;
826
827         if (data_len <= buf_len) {
828                 list->num_bufs = nr;
829                 list->bufers[0].len = data_len;
830                 return 0;
831         }
832
833         buf = buf->next;
834         while (buf) {
835                 if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
836                         PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
837                                         " entry(%u)",
838                                         QAT_SGL_MAX_NUMBER);
839                         return -EINVAL;
840                 }
841
842                 list->bufers[nr].len = rte_pktmbuf_data_len(buf);
843                 list->bufers[nr].resrvd = 0;
844                 list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
845
846                 buf_len += list->bufers[nr].len;
847                 buf = buf->next;
848
849                 if (buf_len > data_len) {
850                         list->bufers[nr].len -=
851                                 buf_len - data_len;
852                         buf = NULL;
853                 }
854                 ++nr;
855         }
856         list->num_bufs = nr;
857
858         return 0;
859 }
860
861 static inline int
862 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
863                 struct qat_crypto_op_cookie *qat_op_cookie)
864 {
865         int ret = 0;
866         struct qat_session *ctx;
867         struct icp_qat_fw_la_cipher_req_params *cipher_param;
868         struct icp_qat_fw_la_auth_req_params *auth_param;
869         register struct icp_qat_fw_la_bulk_req *qat_req;
870         uint8_t do_auth = 0, do_cipher = 0;
871         uint32_t cipher_len = 0, cipher_ofs = 0;
872         uint32_t auth_len = 0, auth_ofs = 0;
873         uint32_t min_ofs = 0;
874         uint64_t src_buf_start = 0, dst_buf_start = 0;
875         uint8_t do_sgl = 0;
876
877
878 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
879         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
880                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
881                                 "operation requests, op (%p) is not a "
882                                 "symmetric operation.", op);
883                 return -EINVAL;
884         }
885 #endif
886         if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
887                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
888                                 " requests, op (%p) is sessionless.", op);
889                 return -EINVAL;
890         }
891
892         if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
893                 PMD_DRV_LOG(ERR, "Session was not created for this device");
894                 return -EINVAL;
895         }
896
897         ctx = (struct qat_session *)op->sym->session->_private;
898         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
899         rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
900         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
901         cipher_param = (void *)&qat_req->serv_specif_rqpars;
902         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
903
904         if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
905                 ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
906                 do_auth = 1;
907                 do_cipher = 1;
908         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
909                 do_auth = 1;
910                 do_cipher = 0;
911         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
912                 do_auth = 0;
913                 do_cipher = 1;
914         }
915
916         if (do_cipher) {
917
918                 if (ctx->qat_cipher_alg ==
919                                          ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
920                         ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
921
922                         if (unlikely(
923                                 (cipher_param->cipher_length % BYTE_LENGTH != 0)
924                                  || (cipher_param->cipher_offset
925                                                         % BYTE_LENGTH != 0))) {
926                                 PMD_DRV_LOG(ERR,
927                   "SNOW3G/KASUMI in QAT PMD only supports byte aligned values");
928                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
929                                 return -EINVAL;
930                         }
931                         cipher_len = op->sym->cipher.data.length >> 3;
932                         cipher_ofs = op->sym->cipher.data.offset >> 3;
933
934                 } else if (ctx->bpi_ctx) {
935                         /* DOCSIS - only send complete blocks to device
936                          * Process any partial block using CFB mode.
937                          * Even if 0 complete blocks, still send this to device
938                          * to get into rx queue for post-process and dequeuing
939                          */
940                         cipher_len = qat_bpicipher_preprocess(ctx, op);
941                         cipher_ofs = op->sym->cipher.data.offset;
942                 } else {
943                         cipher_len = op->sym->cipher.data.length;
944                         cipher_ofs = op->sym->cipher.data.offset;
945                 }
946
947                 /* copy IV into request if it fits */
948                 if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
949                                 sizeof(cipher_param->u.cipher_IV_array))) {
950                         rte_memcpy(cipher_param->u.cipher_IV_array,
951                                         op->sym->cipher.iv.data,
952                                         op->sym->cipher.iv.length);
953                 } else {
954                         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
955                                         qat_req->comn_hdr.serv_specif_flags,
956                                         ICP_QAT_FW_CIPH_IV_64BIT_PTR);
957                         cipher_param->u.s.cipher_IV_ptr =
958                                         op->sym->cipher.iv.phys_addr;
959                 }
960                 min_ofs = cipher_ofs;
961         }
962
963         if (do_auth) {
964
965                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
966                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
967                         if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
968                                 || (auth_param->auth_len % BYTE_LENGTH != 0))) {
969                                 PMD_DRV_LOG(ERR,
970                 "For SNOW3G/KASUMI, QAT PMD only supports byte aligned values");
971                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
972                                 return -EINVAL;
973                         }
974                         auth_ofs = op->sym->auth.data.offset >> 3;
975                         auth_len = op->sym->auth.data.length >> 3;
976
977                         if (ctx->qat_hash_alg ==
978                                         ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
979                                 if (do_cipher) {
980                                         auth_len = auth_len + auth_ofs + 1 -
981                                                 ICP_QAT_HW_KASUMI_BLK_SZ;
982                                         auth_ofs = ICP_QAT_HW_KASUMI_BLK_SZ;
983                                 } else {
984                                         auth_len = auth_len + auth_ofs + 1;
985                                         auth_ofs = 0;
986                                 }
987                         }
988
989                 } else {
990                         auth_ofs = op->sym->auth.data.offset;
991                         auth_len = op->sym->auth.data.length;
992                 }
993                 min_ofs = auth_ofs;
994
995                 auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
996
997                 auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
998
999         }
1000
1001         if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
1002                 do_sgl = 1;
1003
1004         /* adjust for chain case */
1005         if (do_cipher && do_auth)
1006                 min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
1007
1008         if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
1009                 min_ofs = 0;
1010
1011         if (unlikely(op->sym->m_dst != NULL)) {
1012                 /* Out-of-place operation (OOP)
1013                  * Don't align DMA start. DMA the minimum data-set
1014                  * so as not to overwrite data in dest buffer
1015                  */
1016                 src_buf_start =
1017                         rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
1018                 dst_buf_start =
1019                         rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
1020
1021         } else {
1022                 /* In-place operation
1023                  * Start DMA at nearest aligned address below min_ofs
1024                  */
1025                 src_buf_start =
1026                         rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
1027                                                 & QAT_64_BTYE_ALIGN_MASK;
1028
1029                 if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
1030                                         rte_pktmbuf_headroom(op->sym->m_src))
1031                                                         > src_buf_start)) {
1032                         /* alignment has pushed addr ahead of start of mbuf
1033                          * so revert and take the performance hit
1034                          */
1035                         src_buf_start =
1036                                 rte_pktmbuf_mtophys_offset(op->sym->m_src,
1037                                                                 min_ofs);
1038                 }
1039                 dst_buf_start = src_buf_start;
1040         }
1041
1042         if (do_cipher) {
1043                 cipher_param->cipher_offset =
1044                                 (uint32_t)rte_pktmbuf_mtophys_offset(
1045                                 op->sym->m_src, cipher_ofs) - src_buf_start;
1046                 cipher_param->cipher_length = cipher_len;
1047         } else {
1048                 cipher_param->cipher_offset = 0;
1049                 cipher_param->cipher_length = 0;
1050         }
1051         if (do_auth) {
1052                 auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
1053                                 op->sym->m_src, auth_ofs) - src_buf_start;
1054                 auth_param->auth_len = auth_len;
1055         } else {
1056                 auth_param->auth_off = 0;
1057                 auth_param->auth_len = 0;
1058         }
1059         qat_req->comn_mid.dst_length =
1060                 qat_req->comn_mid.src_length =
1061                 (cipher_param->cipher_offset + cipher_param->cipher_length)
1062                 > (auth_param->auth_off + auth_param->auth_len) ?
1063                 (cipher_param->cipher_offset + cipher_param->cipher_length)
1064                 : (auth_param->auth_off + auth_param->auth_len);
1065
1066         if (do_sgl) {
1067
1068                 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
1069                                 QAT_COMN_PTR_TYPE_SGL);
1070                 ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
1071                                 &qat_op_cookie->qat_sgl_list_src,
1072                                 qat_req->comn_mid.src_length);
1073                 if (ret) {
1074                         PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
1075                         return ret;
1076                 }
1077
1078                 if (likely(op->sym->m_dst == NULL))
1079                         qat_req->comn_mid.dest_data_addr =
1080                                 qat_req->comn_mid.src_data_addr =
1081                                 qat_op_cookie->qat_sgl_src_phys_addr;
1082                 else {
1083                         ret = qat_sgl_fill_array(op->sym->m_dst,
1084                                         dst_buf_start,
1085                                         &qat_op_cookie->qat_sgl_list_dst,
1086                                                 qat_req->comn_mid.dst_length);
1087
1088                         if (ret) {
1089                                 PMD_DRV_LOG(ERR, "QAT PMD Cannot "
1090                                                 "fill sgl array");
1091                                 return ret;
1092                         }
1093
1094                         qat_req->comn_mid.src_data_addr =
1095                                 qat_op_cookie->qat_sgl_src_phys_addr;
1096                         qat_req->comn_mid.dest_data_addr =
1097                                         qat_op_cookie->qat_sgl_dst_phys_addr;
1098                 }
1099         } else {
1100                 qat_req->comn_mid.src_data_addr = src_buf_start;
1101                 qat_req->comn_mid.dest_data_addr = dst_buf_start;
1102         }
1103
1104         if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1105                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1106                 if (op->sym->cipher.iv.length == 12) {
1107                         /*
1108                          * For GCM a 12 bit IV is allowed,
1109                          * but we need to inform the f/w
1110                          */
1111                         ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1112                                 qat_req->comn_hdr.serv_specif_flags,
1113                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1114                 }
1115                 if (op->sym->cipher.data.length == 0) {
1116                         /*
1117                          * GMAC
1118                          */
1119                         qat_req->comn_mid.dest_data_addr =
1120                                 qat_req->comn_mid.src_data_addr =
1121                                                 op->sym->auth.aad.phys_addr;
1122                         qat_req->comn_mid.dst_length =
1123                                 qat_req->comn_mid.src_length =
1124                                         rte_pktmbuf_data_len(op->sym->m_src);
1125                         cipher_param->cipher_length = 0;
1126                         cipher_param->cipher_offset = 0;
1127                         auth_param->u1.aad_adr = 0;
1128                         auth_param->auth_len = op->sym->auth.aad.length;
1129                         auth_param->auth_off = op->sym->auth.data.offset;
1130                         auth_param->u2.aad_sz = 0;
1131                 }
1132         }
1133
1134 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1135         rte_hexdump(stdout, "qat_req:", qat_req,
1136                         sizeof(struct icp_qat_fw_la_bulk_req));
1137         rte_hexdump(stdout, "src_data:",
1138                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
1139                         rte_pktmbuf_data_len(op->sym->m_src));
1140         rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
1141                         op->sym->cipher.iv.length);
1142         rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
1143                         op->sym->auth.digest.length);
1144         rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
1145                         op->sym->auth.aad.length);
1146 #endif
1147         return 0;
1148 }
1149
1150 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
1151 {
1152         uint32_t div = data >> shift;
1153         uint32_t mult = div << shift;
1154
1155         return data - mult;
1156 }
1157
1158 void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess)
1159 {
1160         struct rte_cryptodev_sym_session *sess = sym_sess;
1161         struct qat_session *s = (void *)sess->_private;
1162
1163         PMD_INIT_FUNC_TRACE();
1164         s->cd_paddr = rte_mempool_virt2phy(mp, sess) +
1165                 offsetof(struct qat_session, cd) +
1166                 offsetof(struct rte_cryptodev_sym_session, _private);
1167 }
1168
1169 int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
1170                 __rte_unused struct rte_cryptodev_config *config)
1171 {
1172         PMD_INIT_FUNC_TRACE();
1173         return 0;
1174 }
1175
1176 int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
1177 {
1178         PMD_INIT_FUNC_TRACE();
1179         return 0;
1180 }
1181
1182 void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
1183 {
1184         PMD_INIT_FUNC_TRACE();
1185 }
1186
1187 int qat_dev_close(struct rte_cryptodev *dev)
1188 {
1189         int i, ret;
1190
1191         PMD_INIT_FUNC_TRACE();
1192
1193         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1194                 ret = qat_crypto_sym_qp_release(dev, i);
1195                 if (ret < 0)
1196                         return ret;
1197         }
1198
1199         return 0;
1200 }
1201
1202 void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
1203                                 struct rte_cryptodev_info *info)
1204 {
1205         struct qat_pmd_private *internals = dev->data->dev_private;
1206
1207         PMD_INIT_FUNC_TRACE();
1208         if (info != NULL) {
1209                 info->max_nb_queue_pairs =
1210                                 ADF_NUM_SYM_QPS_PER_BUNDLE *
1211                                 ADF_NUM_BUNDLES_PER_DEV;
1212                 info->feature_flags = dev->feature_flags;
1213                 info->capabilities = internals->qat_dev_capabilities;
1214                 info->sym.max_nb_sessions = internals->max_nb_sessions;
1215                 info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
1216         }
1217 }
1218
1219 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
1220                 struct rte_cryptodev_stats *stats)
1221 {
1222         int i;
1223         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1224
1225         PMD_INIT_FUNC_TRACE();
1226         if (stats == NULL) {
1227                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1228                 return;
1229         }
1230         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1231                 if (qp[i] == NULL) {
1232                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1233                         continue;
1234                 }
1235
1236                 stats->enqueued_count += qp[i]->stats.enqueued_count;
1237                 stats->dequeued_count += qp[i]->stats.enqueued_count;
1238                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
1239                 stats->dequeue_err_count += qp[i]->stats.enqueue_err_count;
1240         }
1241 }
1242
1243 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
1244 {
1245         int i;
1246         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1247
1248         PMD_INIT_FUNC_TRACE();
1249         for (i = 0; i < dev->data->nb_queue_pairs; i++)
1250                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
1251         PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
1252 }