net/cxgbe: support updating RSS hash configuration and key
[dpdk.git] / drivers / crypto / qat / qat_crypto.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <strings.h>
8 #include <string.h>
9 #include <inttypes.h>
10 #include <errno.h>
11 #include <sys/queue.h>
12 #include <stdarg.h>
13
14 #include <rte_common.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_tailq.h>
19 #include <rte_malloc.h>
20 #include <rte_launch.h>
21 #include <rte_eal.h>
22 #include <rte_per_lcore.h>
23 #include <rte_lcore.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_mempool.h>
26 #include <rte_mbuf.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
29 #include <rte_hexdump.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_byteorder.h>
32 #include <rte_pci.h>
33 #include <rte_bus_pci.h>
34
35 #include <openssl/evp.h>
36
37 #include "qat_logs.h"
38 #include "qat_algs.h"
39 #include "qat_crypto.h"
40 #include "adf_transport_access_macros.h"
41
42 #define BYTE_LENGTH    8
43 /* bpi is only used for partial blocks of DES and AES
44  * so AES block len can be assumed as max len for iv, src and dst
45  */
46 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
47
48 static int
49 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
50                 struct qat_pmd_private *internals) {
51         int i = 0;
52         const struct rte_cryptodev_capabilities *capability;
53
54         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
55                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
56                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
57                         continue;
58
59                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
60                         continue;
61
62                 if (capability->sym.cipher.algo == algo)
63                         return 1;
64         }
65         return 0;
66 }
67
68 static int
69 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
70                 struct qat_pmd_private *internals) {
71         int i = 0;
72         const struct rte_cryptodev_capabilities *capability;
73
74         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
75                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
76                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
77                         continue;
78
79                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
80                         continue;
81
82                 if (capability->sym.auth.algo == algo)
83                         return 1;
84         }
85         return 0;
86 }
87
88 /** Encrypt a single partial block
89  *  Depends on openssl libcrypto
90  *  Uses ECB+XOR to do CFB encryption, same result, more performant
91  */
92 static inline int
93 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
94                 uint8_t *iv, int ivlen, int srclen,
95                 void *bpi_ctx)
96 {
97         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
98         int encrypted_ivlen;
99         uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
100         uint8_t *encr = encrypted_iv;
101
102         /* ECB method: encrypt the IV, then XOR this with plaintext */
103         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
104                                                                 <= 0)
105                 goto cipher_encrypt_err;
106
107         for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
108                 *dst = *src ^ *encr;
109
110         return 0;
111
112 cipher_encrypt_err:
113         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
114         return -EINVAL;
115 }
116
117 /** Decrypt a single partial block
118  *  Depends on openssl libcrypto
119  *  Uses ECB+XOR to do CFB encryption, same result, more performant
120  */
121 static inline int
122 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
123                 uint8_t *iv, int ivlen, int srclen,
124                 void *bpi_ctx)
125 {
126         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
127         int encrypted_ivlen;
128         uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
129         uint8_t *encr = encrypted_iv;
130
131         /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
132         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
133                                                                 <= 0)
134                 goto cipher_decrypt_err;
135
136         for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
137                 *dst = *src ^ *encr;
138
139         return 0;
140
141 cipher_decrypt_err:
142         PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
143         return -EINVAL;
144 }
145
146 /** Creates a context in either AES or DES in ECB mode
147  *  Depends on openssl libcrypto
148  */
149 static int
150 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
151                 enum rte_crypto_cipher_operation direction __rte_unused,
152                 uint8_t *key, void **ctx)
153 {
154         const EVP_CIPHER *algo = NULL;
155         int ret;
156         *ctx = EVP_CIPHER_CTX_new();
157
158         if (*ctx == NULL) {
159                 ret = -ENOMEM;
160                 goto ctx_init_err;
161         }
162
163         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
164                 algo = EVP_des_ecb();
165         else
166                 algo = EVP_aes_128_ecb();
167
168         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
169         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
170                 ret = -EINVAL;
171                 goto ctx_init_err;
172         }
173
174         return 0;
175
176 ctx_init_err:
177         if (*ctx != NULL)
178                 EVP_CIPHER_CTX_free(*ctx);
179         return ret;
180 }
181
182 /** Frees a context previously created
183  *  Depends on openssl libcrypto
184  */
185 static void
186 bpi_cipher_ctx_free(void *bpi_ctx)
187 {
188         if (bpi_ctx != NULL)
189                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
190 }
191
192 static inline uint32_t
193 adf_modulo(uint32_t data, uint32_t shift);
194
195 static inline int
196 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
197                 struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
198
199 void
200 qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
201                 struct rte_cryptodev_sym_session *sess)
202 {
203         PMD_INIT_FUNC_TRACE();
204         uint8_t index = dev->driver_id;
205         void *sess_priv = get_session_private_data(sess, index);
206         struct qat_session *s = (struct qat_session *)sess_priv;
207
208         if (sess_priv) {
209                 if (s->bpi_ctx)
210                         bpi_cipher_ctx_free(s->bpi_ctx);
211                 memset(s, 0, qat_crypto_sym_get_session_private_size(dev));
212                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
213                 set_session_private_data(sess, index, NULL);
214                 rte_mempool_put(sess_mp, sess_priv);
215         }
216 }
217
218 static int
219 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
220 {
221         /* Cipher Only */
222         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
223                 return ICP_QAT_FW_LA_CMD_CIPHER;
224
225         /* Authentication Only */
226         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
227                 return ICP_QAT_FW_LA_CMD_AUTH;
228
229         /* AEAD */
230         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
231                 /* AES-GCM and AES-CCM works with different direction
232                  * GCM first encrypts and generate hash where AES-CCM
233                  * first generate hash and encrypts. Similar relation
234                  * applies to decryption.
235                  */
236                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
237                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
238                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
239                         else
240                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
241                 else
242                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
243                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
244                         else
245                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
246         }
247
248         if (xform->next == NULL)
249                 return -1;
250
251         /* Cipher then Authenticate */
252         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
253                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
254                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
255
256         /* Authenticate then Cipher */
257         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
258                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
259                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
260
261         return -1;
262 }
263
264 static struct rte_crypto_auth_xform *
265 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
266 {
267         do {
268                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
269                         return &xform->auth;
270
271                 xform = xform->next;
272         } while (xform);
273
274         return NULL;
275 }
276
277 static struct rte_crypto_cipher_xform *
278 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
279 {
280         do {
281                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
282                         return &xform->cipher;
283
284                 xform = xform->next;
285         } while (xform);
286
287         return NULL;
288 }
289
290 int
291 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
292                 struct rte_crypto_sym_xform *xform,
293                 struct qat_session *session)
294 {
295         struct qat_pmd_private *internals = dev->data->dev_private;
296         struct rte_crypto_cipher_xform *cipher_xform = NULL;
297         int ret;
298
299         /* Get cipher xform from crypto xform chain */
300         cipher_xform = qat_get_cipher_xform(xform);
301
302         session->cipher_iv.offset = cipher_xform->iv.offset;
303         session->cipher_iv.length = cipher_xform->iv.length;
304
305         switch (cipher_xform->algo) {
306         case RTE_CRYPTO_CIPHER_AES_CBC:
307                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
308                                 &session->qat_cipher_alg) != 0) {
309                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
310                         ret = -EINVAL;
311                         goto error_out;
312                 }
313                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
314                 break;
315         case RTE_CRYPTO_CIPHER_AES_CTR:
316                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
317                                 &session->qat_cipher_alg) != 0) {
318                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
319                         ret = -EINVAL;
320                         goto error_out;
321                 }
322                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
323                 break;
324         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
325                 if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
326                                         &session->qat_cipher_alg) != 0) {
327                         PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
328                         ret = -EINVAL;
329                         goto error_out;
330                 }
331                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
332                 break;
333         case RTE_CRYPTO_CIPHER_NULL:
334                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
335                 break;
336         case RTE_CRYPTO_CIPHER_KASUMI_F8:
337                 if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
338                                         &session->qat_cipher_alg) != 0) {
339                         PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
340                         ret = -EINVAL;
341                         goto error_out;
342                 }
343                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
344                 break;
345         case RTE_CRYPTO_CIPHER_3DES_CBC:
346                 if (qat_alg_validate_3des_key(cipher_xform->key.length,
347                                 &session->qat_cipher_alg) != 0) {
348                         PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
349                         ret = -EINVAL;
350                         goto error_out;
351                 }
352                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
353                 break;
354         case RTE_CRYPTO_CIPHER_DES_CBC:
355                 if (qat_alg_validate_des_key(cipher_xform->key.length,
356                                 &session->qat_cipher_alg) != 0) {
357                         PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
358                         ret = -EINVAL;
359                         goto error_out;
360                 }
361                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
362                 break;
363         case RTE_CRYPTO_CIPHER_3DES_CTR:
364                 if (qat_alg_validate_3des_key(cipher_xform->key.length,
365                                 &session->qat_cipher_alg) != 0) {
366                         PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
367                         ret = -EINVAL;
368                         goto error_out;
369                 }
370                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
371                 break;
372         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
373                 ret = bpi_cipher_ctx_init(
374                                         cipher_xform->algo,
375                                         cipher_xform->op,
376                                         cipher_xform->key.data,
377                                         &session->bpi_ctx);
378                 if (ret != 0) {
379                         PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
380                         goto error_out;
381                 }
382                 if (qat_alg_validate_des_key(cipher_xform->key.length,
383                                 &session->qat_cipher_alg) != 0) {
384                         PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
385                         ret = -EINVAL;
386                         goto error_out;
387                 }
388                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
389                 break;
390         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
391                 ret = bpi_cipher_ctx_init(
392                                         cipher_xform->algo,
393                                         cipher_xform->op,
394                                         cipher_xform->key.data,
395                                         &session->bpi_ctx);
396                 if (ret != 0) {
397                         PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
398                         goto error_out;
399                 }
400                 if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
401                                 &session->qat_cipher_alg) != 0) {
402                         PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
403                         ret = -EINVAL;
404                         goto error_out;
405                 }
406                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
407                 break;
408         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
409                 if (!qat_is_cipher_alg_supported(
410                         cipher_xform->algo, internals)) {
411                         PMD_DRV_LOG(ERR, "%s not supported on this device",
412                                 rte_crypto_cipher_algorithm_strings
413                                         [cipher_xform->algo]);
414                         ret = -ENOTSUP;
415                         goto error_out;
416                 }
417                 if (qat_alg_validate_zuc_key(cipher_xform->key.length,
418                                 &session->qat_cipher_alg) != 0) {
419                         PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
420                         ret = -EINVAL;
421                         goto error_out;
422                 }
423                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
424                 break;
425         case RTE_CRYPTO_CIPHER_3DES_ECB:
426         case RTE_CRYPTO_CIPHER_AES_ECB:
427         case RTE_CRYPTO_CIPHER_AES_F8:
428         case RTE_CRYPTO_CIPHER_AES_XTS:
429         case RTE_CRYPTO_CIPHER_ARC4:
430                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
431                                 cipher_xform->algo);
432                 ret = -ENOTSUP;
433                 goto error_out;
434         default:
435                 PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
436                                 cipher_xform->algo);
437                 ret = -EINVAL;
438                 goto error_out;
439         }
440
441         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
442                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
443         else
444                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
445
446         if (qat_alg_aead_session_create_content_desc_cipher(session,
447                                                 cipher_xform->key.data,
448                                                 cipher_xform->key.length)) {
449                 ret = -EINVAL;
450                 goto error_out;
451         }
452
453         return 0;
454
455 error_out:
456         if (session->bpi_ctx) {
457                 bpi_cipher_ctx_free(session->bpi_ctx);
458                 session->bpi_ctx = NULL;
459         }
460         return ret;
461 }
462
463 int
464 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
465                 struct rte_crypto_sym_xform *xform,
466                 struct rte_cryptodev_sym_session *sess,
467                 struct rte_mempool *mempool)
468 {
469         void *sess_private_data;
470         int ret;
471
472         if (rte_mempool_get(mempool, &sess_private_data)) {
473                 CDEV_LOG_ERR(
474                         "Couldn't get object from session mempool");
475                 return -ENOMEM;
476         }
477
478         ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data);
479         if (ret != 0) {
480                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure "
481                                 "session parameters");
482
483                 /* Return session to mempool */
484                 rte_mempool_put(mempool, sess_private_data);
485                 return ret;
486         }
487
488         set_session_private_data(sess, dev->driver_id,
489                 sess_private_data);
490
491         return 0;
492 }
493
494 int
495 qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
496                 struct rte_crypto_sym_xform *xform, void *session_private)
497 {
498         struct qat_session *session = session_private;
499         int ret;
500
501         int qat_cmd_id;
502         PMD_INIT_FUNC_TRACE();
503
504         /* Set context descriptor physical address */
505         session->cd_paddr = rte_mempool_virt2iova(session) +
506                         offsetof(struct qat_session, cd);
507
508         session->min_qat_dev_gen = QAT_GEN1;
509
510         /* Get requested QAT command id */
511         qat_cmd_id = qat_get_cmd_id(xform);
512         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
513                 PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
514                 return -ENOTSUP;
515         }
516         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
517         switch (session->qat_cmd) {
518         case ICP_QAT_FW_LA_CMD_CIPHER:
519                 ret = qat_crypto_sym_configure_session_cipher(dev, xform, session);
520                 if (ret < 0)
521                         return ret;
522                 break;
523         case ICP_QAT_FW_LA_CMD_AUTH:
524                 ret = qat_crypto_sym_configure_session_auth(dev, xform, session);
525                 if (ret < 0)
526                         return ret;
527                 break;
528         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
529                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
530                         ret = qat_crypto_sym_configure_session_aead(xform,
531                                         session);
532                         if (ret < 0)
533                                 return ret;
534                 } else {
535                         ret = qat_crypto_sym_configure_session_cipher(dev,
536                                         xform, session);
537                         if (ret < 0)
538                                 return ret;
539                         ret = qat_crypto_sym_configure_session_auth(dev,
540                                         xform, session);
541                         if (ret < 0)
542                                 return ret;
543                 }
544                 break;
545         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
546                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
547                         ret = qat_crypto_sym_configure_session_aead(xform,
548                                         session);
549                         if (ret < 0)
550                                 return ret;
551                 } else {
552                         ret = qat_crypto_sym_configure_session_auth(dev,
553                                         xform, session);
554                         if (ret < 0)
555                                 return ret;
556                         ret = qat_crypto_sym_configure_session_cipher(dev,
557                                         xform, session);
558                         if (ret < 0)
559                                 return ret;
560                 }
561                 break;
562         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
563         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
564         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
565         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
566         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
567         case ICP_QAT_FW_LA_CMD_MGF1:
568         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
569         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
570         case ICP_QAT_FW_LA_CMD_DELIMITER:
571         PMD_DRV_LOG(ERR, "Unsupported Service %u",
572                 session->qat_cmd);
573                 return -ENOTSUP;
574         default:
575         PMD_DRV_LOG(ERR, "Unsupported Service %u",
576                 session->qat_cmd);
577                 return -ENOTSUP;
578         }
579
580         return 0;
581 }
582
583 int
584 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
585                                 struct rte_crypto_sym_xform *xform,
586                                 struct qat_session *session)
587 {
588         struct rte_crypto_auth_xform *auth_xform = NULL;
589         struct qat_pmd_private *internals = dev->data->dev_private;
590         auth_xform = qat_get_auth_xform(xform);
591         uint8_t *key_data = auth_xform->key.data;
592         uint8_t key_length = auth_xform->key.length;
593
594         switch (auth_xform->algo) {
595         case RTE_CRYPTO_AUTH_SHA1_HMAC:
596                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
597                 break;
598         case RTE_CRYPTO_AUTH_SHA224_HMAC:
599                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
600                 break;
601         case RTE_CRYPTO_AUTH_SHA256_HMAC:
602                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
603                 break;
604         case RTE_CRYPTO_AUTH_SHA384_HMAC:
605                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
606                 break;
607         case RTE_CRYPTO_AUTH_SHA512_HMAC:
608                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
609                 break;
610         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
611                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
612                 break;
613         case RTE_CRYPTO_AUTH_AES_GMAC:
614                 if (qat_alg_validate_aes_key(auth_xform->key.length,
615                                 &session->qat_cipher_alg) != 0) {
616                         PMD_DRV_LOG(ERR, "Invalid AES key size");
617                         return -EINVAL;
618                 }
619                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
620                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
621
622                 break;
623         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
624                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
625                 break;
626         case RTE_CRYPTO_AUTH_MD5_HMAC:
627                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
628                 break;
629         case RTE_CRYPTO_AUTH_NULL:
630                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
631                 break;
632         case RTE_CRYPTO_AUTH_KASUMI_F9:
633                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
634                 break;
635         case RTE_CRYPTO_AUTH_ZUC_EIA3:
636                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
637                         PMD_DRV_LOG(ERR, "%s not supported on this device",
638                                 rte_crypto_auth_algorithm_strings
639                                 [auth_xform->algo]);
640                         return -ENOTSUP;
641                 }
642                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
643                 break;
644         case RTE_CRYPTO_AUTH_SHA1:
645         case RTE_CRYPTO_AUTH_SHA256:
646         case RTE_CRYPTO_AUTH_SHA512:
647         case RTE_CRYPTO_AUTH_SHA224:
648         case RTE_CRYPTO_AUTH_SHA384:
649         case RTE_CRYPTO_AUTH_MD5:
650         case RTE_CRYPTO_AUTH_AES_CMAC:
651         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
652                 PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
653                                 auth_xform->algo);
654                 return -ENOTSUP;
655         default:
656                 PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
657                                 auth_xform->algo);
658                 return -EINVAL;
659         }
660
661         session->auth_iv.offset = auth_xform->iv.offset;
662         session->auth_iv.length = auth_xform->iv.length;
663
664         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
665                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
666                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
667                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
668                         /*
669                          * It needs to create cipher desc content first,
670                          * then authentication
671                          */
672                         if (qat_alg_aead_session_create_content_desc_cipher(session,
673                                                 auth_xform->key.data,
674                                                 auth_xform->key.length))
675                                 return -EINVAL;
676
677                         if (qat_alg_aead_session_create_content_desc_auth(session,
678                                                 key_data,
679                                                 key_length,
680                                                 0,
681                                                 auth_xform->digest_length,
682                                                 auth_xform->op))
683                                 return -EINVAL;
684                 } else {
685                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
686                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
687                         /*
688                          * It needs to create authentication desc content first,
689                          * then cipher
690                          */
691                         if (qat_alg_aead_session_create_content_desc_auth(session,
692                                         key_data,
693                                         key_length,
694                                         0,
695                                         auth_xform->digest_length,
696                                         auth_xform->op))
697                                 return -EINVAL;
698
699                         if (qat_alg_aead_session_create_content_desc_cipher(session,
700                                                 auth_xform->key.data,
701                                                 auth_xform->key.length))
702                                 return -EINVAL;
703                 }
704                 /* Restore to authentication only only */
705                 session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
706         } else {
707                 if (qat_alg_aead_session_create_content_desc_auth(session,
708                                 key_data,
709                                 key_length,
710                                 0,
711                                 auth_xform->digest_length,
712                                 auth_xform->op))
713                         return -EINVAL;
714         }
715
716         session->digest_length = auth_xform->digest_length;
717         return 0;
718 }
719
720 int
721 qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
722                                 struct qat_session *session)
723 {
724         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
725         enum rte_crypto_auth_operation crypto_operation;
726
727         /*
728          * Store AEAD IV parameters as cipher IV,
729          * to avoid unnecessary memory usage
730          */
731         session->cipher_iv.offset = xform->aead.iv.offset;
732         session->cipher_iv.length = xform->aead.iv.length;
733
734         switch (aead_xform->algo) {
735         case RTE_CRYPTO_AEAD_AES_GCM:
736                 if (qat_alg_validate_aes_key(aead_xform->key.length,
737                                 &session->qat_cipher_alg) != 0) {
738                         PMD_DRV_LOG(ERR, "Invalid AES key size");
739                         return -EINVAL;
740                 }
741                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
742                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
743                 break;
744         case RTE_CRYPTO_AEAD_AES_CCM:
745                 if (qat_alg_validate_aes_key(aead_xform->key.length,
746                                 &session->qat_cipher_alg) != 0) {
747                         PMD_DRV_LOG(ERR, "Invalid AES key size");
748                         return -EINVAL;
749                 }
750                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
751                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
752                 break;
753         default:
754                 PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
755                                 aead_xform->algo);
756                 return -EINVAL;
757         }
758
759         if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
760                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
761                         (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
762                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
763                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
764                 /*
765                  * It needs to create cipher desc content first,
766                  * then authentication
767                  */
768
769                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
770                         RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
771
772                 if (qat_alg_aead_session_create_content_desc_cipher(session,
773                                         aead_xform->key.data,
774                                         aead_xform->key.length))
775                         return -EINVAL;
776
777                 if (qat_alg_aead_session_create_content_desc_auth(session,
778                                         aead_xform->key.data,
779                                         aead_xform->key.length,
780                                         aead_xform->aad_length,
781                                         aead_xform->digest_length,
782                                         crypto_operation))
783                         return -EINVAL;
784         } else {
785                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
786                 /*
787                  * It needs to create authentication desc content first,
788                  * then cipher
789                  */
790
791                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
792                         RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
793
794                 if (qat_alg_aead_session_create_content_desc_auth(session,
795                                         aead_xform->key.data,
796                                         aead_xform->key.length,
797                                         aead_xform->aad_length,
798                                         aead_xform->digest_length,
799                                         crypto_operation))
800                         return -EINVAL;
801
802                 if (qat_alg_aead_session_create_content_desc_cipher(session,
803                                         aead_xform->key.data,
804                                         aead_xform->key.length))
805                         return -EINVAL;
806         }
807
808         session->digest_length = aead_xform->digest_length;
809         return 0;
810 }
811
812 unsigned qat_crypto_sym_get_session_private_size(
813                 struct rte_cryptodev *dev __rte_unused)
814 {
815         return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
816 }
817
818 static inline uint32_t
819 qat_bpicipher_preprocess(struct qat_session *ctx,
820                                 struct rte_crypto_op *op)
821 {
822         int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
823         struct rte_crypto_sym_op *sym_op = op->sym;
824         uint8_t last_block_len = block_len > 0 ?
825                         sym_op->cipher.data.length % block_len : 0;
826
827         if (last_block_len &&
828                         ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
829
830                 /* Decrypt last block */
831                 uint8_t *last_block, *dst, *iv;
832                 uint32_t last_block_offset = sym_op->cipher.data.offset +
833                                 sym_op->cipher.data.length - last_block_len;
834                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
835                                 uint8_t *, last_block_offset);
836
837                 if (unlikely(sym_op->m_dst != NULL))
838                         /* out-of-place operation (OOP) */
839                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
840                                                 uint8_t *, last_block_offset);
841                 else
842                         dst = last_block;
843
844                 if (last_block_len < sym_op->cipher.data.length)
845                         /* use previous block ciphertext as IV */
846                         iv = last_block - block_len;
847                 else
848                         /* runt block, i.e. less than one full block */
849                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
850                                         ctx->cipher_iv.offset);
851
852 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
853                 rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
854                         last_block_len);
855                 if (sym_op->m_dst != NULL)
856                         rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
857                                 last_block_len);
858 #endif
859                 bpi_cipher_decrypt(last_block, dst, iv, block_len,
860                                 last_block_len, ctx->bpi_ctx);
861 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
862                 rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
863                         last_block_len);
864                 if (sym_op->m_dst != NULL)
865                         rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
866                                 last_block_len);
867 #endif
868         }
869
870         return sym_op->cipher.data.length - last_block_len;
871 }
872
873 static inline uint32_t
874 qat_bpicipher_postprocess(struct qat_session *ctx,
875                                 struct rte_crypto_op *op)
876 {
877         int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
878         struct rte_crypto_sym_op *sym_op = op->sym;
879         uint8_t last_block_len = block_len > 0 ?
880                         sym_op->cipher.data.length % block_len : 0;
881
882         if (last_block_len > 0 &&
883                         ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
884
885                 /* Encrypt last block */
886                 uint8_t *last_block, *dst, *iv;
887                 uint32_t last_block_offset;
888
889                 last_block_offset = sym_op->cipher.data.offset +
890                                 sym_op->cipher.data.length - last_block_len;
891                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
892                                 uint8_t *, last_block_offset);
893
894                 if (unlikely(sym_op->m_dst != NULL))
895                         /* out-of-place operation (OOP) */
896                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
897                                                 uint8_t *, last_block_offset);
898                 else
899                         dst = last_block;
900
901                 if (last_block_len < sym_op->cipher.data.length)
902                         /* use previous block ciphertext as IV */
903                         iv = dst - block_len;
904                 else
905                         /* runt block, i.e. less than one full block */
906                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
907                                         ctx->cipher_iv.offset);
908
909 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
910                 rte_hexdump(stdout, "BPI: src before post-process:", last_block,
911                         last_block_len);
912                 if (sym_op->m_dst != NULL)
913                         rte_hexdump(stdout, "BPI: dst before post-process:",
914                                         dst, last_block_len);
915 #endif
916                 bpi_cipher_encrypt(last_block, dst, iv, block_len,
917                                 last_block_len, ctx->bpi_ctx);
918 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
919                 rte_hexdump(stdout, "BPI: src after post-process:", last_block,
920                         last_block_len);
921                 if (sym_op->m_dst != NULL)
922                         rte_hexdump(stdout, "BPI: dst after post-process:", dst,
923                                 last_block_len);
924 #endif
925         }
926         return sym_op->cipher.data.length - last_block_len;
927 }
928
929 static inline void
930 txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
931         WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
932                         q->hw_queue_number, q->tail);
933         q->nb_pending_requests = 0;
934         q->csr_tail = q->tail;
935 }
936
937 uint16_t
938 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
939                 uint16_t nb_ops)
940 {
941         register struct qat_queue *queue;
942         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
943         register uint32_t nb_ops_sent = 0;
944         register struct rte_crypto_op **cur_op = ops;
945         register int ret;
946         uint16_t nb_ops_possible = nb_ops;
947         register uint8_t *base_addr;
948         register uint32_t tail;
949         int overflow;
950
951         if (unlikely(nb_ops == 0))
952                 return 0;
953
954         /* read params used a lot in main loop into registers */
955         queue = &(tmp_qp->tx_q);
956         base_addr = (uint8_t *)queue->base_addr;
957         tail = queue->tail;
958
959         /* Find how many can actually fit on the ring */
960         tmp_qp->inflights16 += nb_ops;
961         overflow = tmp_qp->inflights16 - queue->max_inflights;
962         if (overflow > 0) {
963                 tmp_qp->inflights16 -= overflow;
964                 nb_ops_possible = nb_ops - overflow;
965                 if (nb_ops_possible == 0)
966                         return 0;
967         }
968
969         while (nb_ops_sent != nb_ops_possible) {
970                 ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
971                         tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
972                 if (ret != 0) {
973                         tmp_qp->stats.enqueue_err_count++;
974                         /*
975                          * This message cannot be enqueued,
976                          * decrease number of ops that wasn't sent
977                          */
978                         tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
979                         if (nb_ops_sent == 0)
980                                 return 0;
981                         goto kick_tail;
982                 }
983
984                 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
985                 nb_ops_sent++;
986                 cur_op++;
987         }
988 kick_tail:
989         queue->tail = tail;
990         tmp_qp->stats.enqueued_count += nb_ops_sent;
991         queue->nb_pending_requests += nb_ops_sent;
992         if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
993                         queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
994                 txq_write_tail(tmp_qp, queue);
995         }
996         return nb_ops_sent;
997 }
998
999 static inline
1000 void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
1001 {
1002         uint32_t old_head, new_head;
1003         uint32_t max_head;
1004
1005         old_head = q->csr_head;
1006         new_head = q->head;
1007         max_head = qp->nb_descriptors * q->msg_size;
1008
1009         /* write out free descriptors */
1010         void *cur_desc = (uint8_t *)q->base_addr + old_head;
1011
1012         if (new_head < old_head) {
1013                 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
1014                 memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
1015         } else {
1016                 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
1017         }
1018         q->nb_processed_responses = 0;
1019         q->csr_head = new_head;
1020
1021         /* write current head to CSR */
1022         WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
1023                             q->hw_queue_number, new_head);
1024 }
1025
1026 uint16_t
1027 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
1028                 uint16_t nb_ops)
1029 {
1030         struct qat_queue *rx_queue, *tx_queue;
1031         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
1032         uint32_t msg_counter = 0;
1033         struct rte_crypto_op *rx_op;
1034         struct icp_qat_fw_comn_resp *resp_msg;
1035         uint32_t head;
1036
1037         rx_queue = &(tmp_qp->rx_q);
1038         tx_queue = &(tmp_qp->tx_q);
1039         head = rx_queue->head;
1040         resp_msg = (struct icp_qat_fw_comn_resp *)
1041                         ((uint8_t *)rx_queue->base_addr + head);
1042
1043         while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
1044                         msg_counter != nb_ops) {
1045                 rx_op = (struct rte_crypto_op *)(uintptr_t)
1046                                 (resp_msg->opaque_data);
1047
1048 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
1049                 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
1050                         sizeof(struct icp_qat_fw_comn_resp));
1051 #endif
1052                 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
1053                                 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
1054                                         resp_msg->comn_hdr.comn_status)) {
1055                         rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1056                 } else {
1057                         struct qat_session *sess = (struct qat_session *)
1058                                         get_session_private_data(
1059                                         rx_op->sym->session,
1060                                         cryptodev_qat_driver_id);
1061
1062                         if (sess->bpi_ctx)
1063                                 qat_bpicipher_postprocess(sess, rx_op);
1064                         rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1065                 }
1066
1067                 head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
1068                 resp_msg = (struct icp_qat_fw_comn_resp *)
1069                                 ((uint8_t *)rx_queue->base_addr + head);
1070                 *ops = rx_op;
1071                 ops++;
1072                 msg_counter++;
1073         }
1074         if (msg_counter > 0) {
1075                 rx_queue->head = head;
1076                 tmp_qp->stats.dequeued_count += msg_counter;
1077                 rx_queue->nb_processed_responses += msg_counter;
1078                 tmp_qp->inflights16 -= msg_counter;
1079
1080                 if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
1081                         rxq_free_desc(tmp_qp, rx_queue);
1082         }
1083         /* also check if tail needs to be advanced */
1084         if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
1085                         tx_queue->tail != tx_queue->csr_tail) {
1086                 txq_write_tail(tmp_qp, tx_queue);
1087         }
1088         return msg_counter;
1089 }
1090
1091 static inline int
1092 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
1093                 struct qat_alg_buf_list *list, uint32_t data_len)
1094 {
1095         int nr = 1;
1096
1097         uint32_t buf_len = rte_pktmbuf_iova(buf) -
1098                         buff_start + rte_pktmbuf_data_len(buf);
1099
1100         list->bufers[0].addr = buff_start;
1101         list->bufers[0].resrvd = 0;
1102         list->bufers[0].len = buf_len;
1103
1104         if (data_len <= buf_len) {
1105                 list->num_bufs = nr;
1106                 list->bufers[0].len = data_len;
1107                 return 0;
1108         }
1109
1110         buf = buf->next;
1111         while (buf) {
1112                 if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
1113                         PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
1114                                         " entry(%u)",
1115                                         QAT_SGL_MAX_NUMBER);
1116                         return -EINVAL;
1117                 }
1118
1119                 list->bufers[nr].len = rte_pktmbuf_data_len(buf);
1120                 list->bufers[nr].resrvd = 0;
1121                 list->bufers[nr].addr = rte_pktmbuf_iova(buf);
1122
1123                 buf_len += list->bufers[nr].len;
1124                 buf = buf->next;
1125
1126                 if (buf_len > data_len) {
1127                         list->bufers[nr].len -=
1128                                 buf_len - data_len;
1129                         buf = NULL;
1130                 }
1131                 ++nr;
1132         }
1133         list->num_bufs = nr;
1134
1135         return 0;
1136 }
1137
1138 static inline void
1139 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
1140                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
1141                 struct rte_crypto_op *op,
1142                 struct icp_qat_fw_la_bulk_req *qat_req)
1143 {
1144         /* copy IV into request if it fits */
1145         if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
1146                 rte_memcpy(cipher_param->u.cipher_IV_array,
1147                                 rte_crypto_op_ctod_offset(op, uint8_t *,
1148                                         iv_offset),
1149                                 iv_length);
1150         } else {
1151                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
1152                                 qat_req->comn_hdr.serv_specif_flags,
1153                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
1154                 cipher_param->u.s.cipher_IV_ptr =
1155                                 rte_crypto_op_ctophys_offset(op,
1156                                         iv_offset);
1157         }
1158 }
1159
1160 /** Set IV for CCM is special case, 0th byte is set to q-1
1161  *  where q is padding of nonce in 16 byte block
1162  */
1163 static inline void
1164 set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
1165                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
1166                 struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
1167 {
1168         rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
1169                         ICP_QAT_HW_CCM_NONCE_OFFSET,
1170                         rte_crypto_op_ctod_offset(op, uint8_t *,
1171                                 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
1172                         iv_length);
1173         *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
1174                         q - ICP_QAT_HW_CCM_NONCE_OFFSET;
1175
1176         if (aad_len_field_sz)
1177                 rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
1178                         rte_crypto_op_ctod_offset(op, uint8_t *,
1179                                 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
1180                         iv_length);
1181 }
1182
1183 static inline int
1184 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
1185                 struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
1186 {
1187         int ret = 0;
1188         struct qat_session *ctx;
1189         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1190         struct icp_qat_fw_la_auth_req_params *auth_param;
1191         register struct icp_qat_fw_la_bulk_req *qat_req;
1192         uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
1193         uint32_t cipher_len = 0, cipher_ofs = 0;
1194         uint32_t auth_len = 0, auth_ofs = 0;
1195         uint32_t min_ofs = 0;
1196         uint64_t src_buf_start = 0, dst_buf_start = 0;
1197         uint8_t do_sgl = 0;
1198
1199 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1200         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
1201                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
1202                                 "operation requests, op (%p) is not a "
1203                                 "symmetric operation.", op);
1204                 return -EINVAL;
1205         }
1206 #endif
1207         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
1208                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
1209                                 " requests, op (%p) is sessionless.", op);
1210                 return -EINVAL;
1211         }
1212
1213         ctx = (struct qat_session *)get_session_private_data(
1214                         op->sym->session, cryptodev_qat_driver_id);
1215
1216         if (unlikely(ctx == NULL)) {
1217                 PMD_DRV_LOG(ERR, "Session was not created for this device");
1218                 return -EINVAL;
1219         }
1220
1221         if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
1222                 PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
1223                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1224                 return -EINVAL;
1225         }
1226
1227
1228
1229         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
1230         rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
1231         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
1232         cipher_param = (void *)&qat_req->serv_specif_rqpars;
1233         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
1234
1235         if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
1236                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1237                 /* AES-GCM or AES-CCM */
1238                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1239                                 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
1240                                 (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
1241                                 && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
1242                                 && ctx->qat_hash_alg ==
1243                                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
1244                         do_aead = 1;
1245                 } else {
1246                         do_auth = 1;
1247                         do_cipher = 1;
1248                 }
1249         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1250                 do_auth = 1;
1251                 do_cipher = 0;
1252         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1253                 do_auth = 0;
1254                 do_cipher = 1;
1255         }
1256
1257         if (do_cipher) {
1258
1259                 if (ctx->qat_cipher_alg ==
1260                                          ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
1261                         ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
1262                         ctx->qat_cipher_alg ==
1263                                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1264
1265                         if (unlikely(
1266                                 (cipher_param->cipher_length % BYTE_LENGTH != 0)
1267                                  || (cipher_param->cipher_offset
1268                                                         % BYTE_LENGTH != 0))) {
1269                                 PMD_DRV_LOG(ERR,
1270                   "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
1271                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1272                                 return -EINVAL;
1273                         }
1274                         cipher_len = op->sym->cipher.data.length >> 3;
1275                         cipher_ofs = op->sym->cipher.data.offset >> 3;
1276
1277                 } else if (ctx->bpi_ctx) {
1278                         /* DOCSIS - only send complete blocks to device
1279                          * Process any partial block using CFB mode.
1280                          * Even if 0 complete blocks, still send this to device
1281                          * to get into rx queue for post-process and dequeuing
1282                          */
1283                         cipher_len = qat_bpicipher_preprocess(ctx, op);
1284                         cipher_ofs = op->sym->cipher.data.offset;
1285                 } else {
1286                         cipher_len = op->sym->cipher.data.length;
1287                         cipher_ofs = op->sym->cipher.data.offset;
1288                 }
1289
1290                 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
1291                                 cipher_param, op, qat_req);
1292                 min_ofs = cipher_ofs;
1293         }
1294
1295         if (do_auth) {
1296
1297                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
1298                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
1299                         ctx->qat_hash_alg ==
1300                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
1301                         if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
1302                                 || (auth_param->auth_len % BYTE_LENGTH != 0))) {
1303                                 PMD_DRV_LOG(ERR,
1304                 "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
1305                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1306                                 return -EINVAL;
1307                         }
1308                         auth_ofs = op->sym->auth.data.offset >> 3;
1309                         auth_len = op->sym->auth.data.length >> 3;
1310
1311                         auth_param->u1.aad_adr =
1312                                         rte_crypto_op_ctophys_offset(op,
1313                                                         ctx->auth_iv.offset);
1314
1315                 } else if (ctx->qat_hash_alg ==
1316                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1317                                 ctx->qat_hash_alg ==
1318                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1319                         /* AES-GMAC */
1320                         set_cipher_iv(ctx->auth_iv.length,
1321                                 ctx->auth_iv.offset,
1322                                 cipher_param, op, qat_req);
1323                         auth_ofs = op->sym->auth.data.offset;
1324                         auth_len = op->sym->auth.data.length;
1325
1326                         auth_param->u1.aad_adr = 0;
1327                         auth_param->u2.aad_sz = 0;
1328
1329                         /*
1330                          * If len(iv)==12B fw computes J0
1331                          */
1332                         if (ctx->auth_iv.length == 12) {
1333                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1334                                         qat_req->comn_hdr.serv_specif_flags,
1335                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1336
1337                         }
1338                 } else {
1339                         auth_ofs = op->sym->auth.data.offset;
1340                         auth_len = op->sym->auth.data.length;
1341
1342                 }
1343                 min_ofs = auth_ofs;
1344
1345                 if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
1346                         auth_param->auth_res_addr =
1347                                         op->sym->auth.digest.phys_addr;
1348
1349         }
1350
1351         if (do_aead) {
1352                 /*
1353                  * This address may used for setting AAD physical pointer
1354                  * into IV offset from op
1355                  */
1356                 rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
1357                 if (ctx->qat_hash_alg ==
1358                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1359                                 ctx->qat_hash_alg ==
1360                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1361                         /*
1362                          * If len(iv)==12B fw computes J0
1363                          */
1364                         if (ctx->cipher_iv.length == 12) {
1365                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1366                                         qat_req->comn_hdr.serv_specif_flags,
1367                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1368                         }
1369
1370                         set_cipher_iv(ctx->cipher_iv.length,
1371                                         ctx->cipher_iv.offset,
1372                                         cipher_param, op, qat_req);
1373
1374                 } else if (ctx->qat_hash_alg ==
1375                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
1376
1377                         /* In case of AES-CCM this may point to user selected memory
1378                          * or iv offset in cypto_op
1379                          */
1380                         uint8_t *aad_data = op->sym->aead.aad.data;
1381                         /* This is true AAD length, it not includes 18 bytes of
1382                          * preceding data
1383                          */
1384                         uint8_t aad_ccm_real_len = 0;
1385
1386                         uint8_t aad_len_field_sz = 0;
1387                         uint32_t msg_len_be =
1388                                         rte_bswap32(op->sym->aead.data.length);
1389
1390                         if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
1391                                 aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
1392                                 aad_ccm_real_len = ctx->aad_len -
1393                                         ICP_QAT_HW_CCM_AAD_B0_LEN -
1394                                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
1395                         } else {
1396                                 /*
1397                                  * aad_len not greater than 18, so no actual aad data,
1398                                  * then use IV after op for B0 block
1399                                  */
1400                                 aad_data = rte_crypto_op_ctod_offset(op, uint8_t *,
1401                                                 ctx->cipher_iv.offset);
1402                                 aad_phys_addr_aead =
1403                                                 rte_crypto_op_ctophys_offset(op,
1404                                                                 ctx->cipher_iv.offset);
1405                         }
1406
1407                         uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
1408
1409                         aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
1410                                                         ctx->digest_length, q);
1411
1412                         if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
1413                                 memcpy(aad_data + ctx->cipher_iv.length +
1414                                         ICP_QAT_HW_CCM_NONCE_OFFSET
1415                                         + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
1416                                         (uint8_t *)&msg_len_be,
1417                                         ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
1418                         } else {
1419                                 memcpy(aad_data + ctx->cipher_iv.length +
1420                                         ICP_QAT_HW_CCM_NONCE_OFFSET,
1421                                         (uint8_t *)&msg_len_be
1422                                         + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
1423                                         - q), q);
1424                         }
1425
1426                         if (aad_len_field_sz > 0) {
1427                                 *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
1428                                                 = rte_bswap16(aad_ccm_real_len);
1429
1430                                 if ((aad_ccm_real_len + aad_len_field_sz)
1431                                                 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
1432                                         uint8_t pad_len = 0;
1433                                         uint8_t pad_idx = 0;
1434
1435                                         pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
1436                                                 ((aad_ccm_real_len + aad_len_field_sz) %
1437                                                         ICP_QAT_HW_CCM_AAD_B0_LEN);
1438                                         pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
1439                                                 aad_ccm_real_len + aad_len_field_sz;
1440                                         memset(&aad_data[pad_idx],
1441                                                         0, pad_len);
1442                                 }
1443
1444                         }
1445
1446                         set_cipher_iv_ccm(ctx->cipher_iv.length,
1447                                         ctx->cipher_iv.offset,
1448                                         cipher_param, op, q,
1449                                         aad_len_field_sz);
1450
1451                 }
1452
1453                 cipher_len = op->sym->aead.data.length;
1454                 cipher_ofs = op->sym->aead.data.offset;
1455                 auth_len = op->sym->aead.data.length;
1456                 auth_ofs = op->sym->aead.data.offset;
1457
1458                 auth_param->u1.aad_adr = aad_phys_addr_aead;
1459                 auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
1460                 min_ofs = op->sym->aead.data.offset;
1461         }
1462
1463         if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
1464                 do_sgl = 1;
1465
1466         /* adjust for chain case */
1467         if (do_cipher && do_auth)
1468                 min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
1469
1470         if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
1471                 min_ofs = 0;
1472
1473         if (unlikely(op->sym->m_dst != NULL)) {
1474                 /* Out-of-place operation (OOP)
1475                  * Don't align DMA start. DMA the minimum data-set
1476                  * so as not to overwrite data in dest buffer
1477                  */
1478                 src_buf_start =
1479                         rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
1480                 dst_buf_start =
1481                         rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
1482
1483         } else {
1484                 /* In-place operation
1485                  * Start DMA at nearest aligned address below min_ofs
1486                  */
1487                 src_buf_start =
1488                         rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
1489                                                 & QAT_64_BTYE_ALIGN_MASK;
1490
1491                 if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
1492                                         rte_pktmbuf_headroom(op->sym->m_src))
1493                                                         > src_buf_start)) {
1494                         /* alignment has pushed addr ahead of start of mbuf
1495                          * so revert and take the performance hit
1496                          */
1497                         src_buf_start =
1498                                 rte_pktmbuf_iova_offset(op->sym->m_src,
1499                                                                 min_ofs);
1500                 }
1501                 dst_buf_start = src_buf_start;
1502         }
1503
1504         if (do_cipher || do_aead) {
1505                 cipher_param->cipher_offset =
1506                                 (uint32_t)rte_pktmbuf_iova_offset(
1507                                 op->sym->m_src, cipher_ofs) - src_buf_start;
1508                 cipher_param->cipher_length = cipher_len;
1509         } else {
1510                 cipher_param->cipher_offset = 0;
1511                 cipher_param->cipher_length = 0;
1512         }
1513
1514         if (do_auth || do_aead) {
1515                 auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
1516                                 op->sym->m_src, auth_ofs) - src_buf_start;
1517                 auth_param->auth_len = auth_len;
1518         } else {
1519                 auth_param->auth_off = 0;
1520                 auth_param->auth_len = 0;
1521         }
1522
1523         qat_req->comn_mid.dst_length =
1524                 qat_req->comn_mid.src_length =
1525                 (cipher_param->cipher_offset + cipher_param->cipher_length)
1526                 > (auth_param->auth_off + auth_param->auth_len) ?
1527                 (cipher_param->cipher_offset + cipher_param->cipher_length)
1528                 : (auth_param->auth_off + auth_param->auth_len);
1529
1530         if (do_sgl) {
1531
1532                 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
1533                                 QAT_COMN_PTR_TYPE_SGL);
1534                 ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
1535                                 &qat_op_cookie->qat_sgl_list_src,
1536                                 qat_req->comn_mid.src_length);
1537                 if (ret) {
1538                         PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
1539                         return ret;
1540                 }
1541
1542                 if (likely(op->sym->m_dst == NULL))
1543                         qat_req->comn_mid.dest_data_addr =
1544                                 qat_req->comn_mid.src_data_addr =
1545                                 qat_op_cookie->qat_sgl_src_phys_addr;
1546                 else {
1547                         ret = qat_sgl_fill_array(op->sym->m_dst,
1548                                         dst_buf_start,
1549                                         &qat_op_cookie->qat_sgl_list_dst,
1550                                                 qat_req->comn_mid.dst_length);
1551
1552                         if (ret) {
1553                                 PMD_DRV_LOG(ERR, "QAT PMD Cannot "
1554                                                 "fill sgl array");
1555                                 return ret;
1556                         }
1557
1558                         qat_req->comn_mid.src_data_addr =
1559                                 qat_op_cookie->qat_sgl_src_phys_addr;
1560                         qat_req->comn_mid.dest_data_addr =
1561                                         qat_op_cookie->qat_sgl_dst_phys_addr;
1562                 }
1563         } else {
1564                 qat_req->comn_mid.src_data_addr = src_buf_start;
1565                 qat_req->comn_mid.dest_data_addr = dst_buf_start;
1566         }
1567
1568 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1569         rte_hexdump(stdout, "qat_req:", qat_req,
1570                         sizeof(struct icp_qat_fw_la_bulk_req));
1571         rte_hexdump(stdout, "src_data:",
1572                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
1573                         rte_pktmbuf_data_len(op->sym->m_src));
1574         if (do_cipher) {
1575                 uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
1576                                                 uint8_t *,
1577                                                 ctx->cipher_iv.offset);
1578                 rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
1579                                 ctx->cipher_iv.length);
1580         }
1581
1582         if (do_auth) {
1583                 if (ctx->auth_iv.length) {
1584                         uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
1585                                                         uint8_t *,
1586                                                         ctx->auth_iv.offset);
1587                         rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
1588                                                 ctx->auth_iv.length);
1589                 }
1590                 rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
1591                                 ctx->digest_length);
1592         }
1593
1594         if (do_aead) {
1595                 rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
1596                                 ctx->digest_length);
1597                 rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
1598                                 ctx->aad_len);
1599         }
1600 #endif
1601         return 0;
1602 }
1603
1604 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
1605 {
1606         uint32_t div = data >> shift;
1607         uint32_t mult = div << shift;
1608
1609         return data - mult;
1610 }
1611
1612 int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
1613                 __rte_unused struct rte_cryptodev_config *config)
1614 {
1615         PMD_INIT_FUNC_TRACE();
1616         return 0;
1617 }
1618
1619 int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
1620 {
1621         PMD_INIT_FUNC_TRACE();
1622         return 0;
1623 }
1624
1625 void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
1626 {
1627         PMD_INIT_FUNC_TRACE();
1628 }
1629
1630 int qat_dev_close(struct rte_cryptodev *dev)
1631 {
1632         int i, ret;
1633
1634         PMD_INIT_FUNC_TRACE();
1635
1636         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1637                 ret = qat_crypto_sym_qp_release(dev, i);
1638                 if (ret < 0)
1639                         return ret;
1640         }
1641
1642         return 0;
1643 }
1644
1645 void qat_dev_info_get(struct rte_cryptodev *dev,
1646                         struct rte_cryptodev_info *info)
1647 {
1648         struct qat_pmd_private *internals = dev->data->dev_private;
1649
1650         PMD_INIT_FUNC_TRACE();
1651         if (info != NULL) {
1652                 info->max_nb_queue_pairs =
1653                                 ADF_NUM_SYM_QPS_PER_BUNDLE *
1654                                 ADF_NUM_BUNDLES_PER_DEV;
1655                 info->feature_flags = dev->feature_flags;
1656                 info->capabilities = internals->qat_dev_capabilities;
1657                 info->sym.max_nb_sessions = internals->max_nb_sessions;
1658                 info->driver_id = cryptodev_qat_driver_id;
1659                 info->pci_dev = RTE_DEV_TO_PCI(dev->device);
1660         }
1661 }
1662
1663 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
1664                 struct rte_cryptodev_stats *stats)
1665 {
1666         int i;
1667         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1668
1669         PMD_INIT_FUNC_TRACE();
1670         if (stats == NULL) {
1671                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1672                 return;
1673         }
1674         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1675                 if (qp[i] == NULL) {
1676                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1677                         continue;
1678                 }
1679
1680                 stats->enqueued_count += qp[i]->stats.enqueued_count;
1681                 stats->dequeued_count += qp[i]->stats.dequeued_count;
1682                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
1683                 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
1684         }
1685 }
1686
1687 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
1688 {
1689         int i;
1690         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1691
1692         PMD_INIT_FUNC_TRACE();
1693         for (i = 0; i < dev->data->nb_queue_pairs; i++)
1694                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
1695         PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
1696 }