f94a1b03d1c3206a57e6b50caa932fc27035ebde
[dpdk.git] / drivers / crypto / qat / qat_crypto.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *       * Redistributions of source code must retain the above copyright
12  *         notice, this list of conditions and the following disclaimer.
13  *       * Redistributions in binary form must reproduce the above copyright
14  *         notice, this list of conditions and the following disclaimer in
15  *         the documentation and/or other materials provided with the
16  *         distribution.
17  *       * Neither the name of Intel Corporation nor the names of its
18  *         contributors may be used to endorse or promote products derived
19  *         from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <string.h>
38 #include <inttypes.h>
39 #include <errno.h>
40 #include <sys/queue.h>
41 #include <stdarg.h>
42
43 #include <rte_common.h>
44 #include <rte_log.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_tailq.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_launch.h>
52 #include <rte_eal.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_mempool.h>
58 #include <rte_mbuf.h>
59 #include <rte_string_fns.h>
60 #include <rte_spinlock.h>
61 #include <rte_hexdump.h>
62 #include <rte_crypto_sym.h>
63 #include <rte_cryptodev_pci.h>
64 #include <openssl/evp.h>
65
66 #include "qat_logs.h"
67 #include "qat_algs.h"
68 #include "qat_crypto.h"
69 #include "adf_transport_access_macros.h"
70
71 #define BYTE_LENGTH    8
72
73 static int
74 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
75                 struct qat_pmd_private *internals) {
76         int i = 0;
77         const struct rte_cryptodev_capabilities *capability;
78
79         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
80                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
81                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
82                         continue;
83
84                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
85                         continue;
86
87                 if (capability->sym.cipher.algo == algo)
88                         return 1;
89         }
90         return 0;
91 }
92
93 static int
94 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
95                 struct qat_pmd_private *internals) {
96         int i = 0;
97         const struct rte_cryptodev_capabilities *capability;
98
99         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
100                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
101                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
102                         continue;
103
104                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
105                         continue;
106
107                 if (capability->sym.auth.algo == algo)
108                         return 1;
109         }
110         return 0;
111 }
112
113 /** Encrypt a single partial block
114  *  Depends on openssl libcrypto
115  *  Uses ECB+XOR to do CFB encryption, same result, more performant
116  */
117 static inline int
118 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
119                 uint8_t *iv, int ivlen, int srclen,
120                 void *bpi_ctx)
121 {
122         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
123         int encrypted_ivlen;
124         uint8_t encrypted_iv[16];
125         int i;
126
127         /* ECB method: encrypt the IV, then XOR this with plaintext */
128         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
129                                                                 <= 0)
130                 goto cipher_encrypt_err;
131
132         for (i = 0; i < srclen; i++)
133                 *(dst+i) = *(src+i)^(encrypted_iv[i]);
134
135         return 0;
136
137 cipher_encrypt_err:
138         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
139         return -EINVAL;
140 }
141
142 /** Decrypt a single partial block
143  *  Depends on openssl libcrypto
144  *  Uses ECB+XOR to do CFB encryption, same result, more performant
145  */
146 static inline int
147 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
148                 uint8_t *iv, int ivlen, int srclen,
149                 void *bpi_ctx)
150 {
151         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
152         int encrypted_ivlen;
153         uint8_t encrypted_iv[16];
154         int i;
155
156         /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
157         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
158                                                                 <= 0)
159                 goto cipher_decrypt_err;
160
161         for (i = 0; i < srclen; i++)
162                 *(dst+i) = *(src+i)^(encrypted_iv[i]);
163
164         return 0;
165
166 cipher_decrypt_err:
167         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed");
168         return -EINVAL;
169 }
170
171 /** Creates a context in either AES or DES in ECB mode
172  *  Depends on openssl libcrypto
173  */
174 static void *
175 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
176                 enum rte_crypto_cipher_operation direction __rte_unused,
177                                         uint8_t *key)
178 {
179         const EVP_CIPHER *algo = NULL;
180         EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new();
181
182         if (ctx == NULL)
183                 goto ctx_init_err;
184
185         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
186                 algo = EVP_des_ecb();
187         else
188                 algo = EVP_aes_128_ecb();
189
190         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
191         if (EVP_EncryptInit_ex(ctx, algo, NULL, key, 0) != 1)
192                 goto ctx_init_err;
193
194         return ctx;
195
196 ctx_init_err:
197         if (ctx != NULL)
198                 EVP_CIPHER_CTX_free(ctx);
199         return NULL;
200 }
201
202 /** Frees a context previously created
203  *  Depends on openssl libcrypto
204  */
205 static void
206 bpi_cipher_ctx_free(void *bpi_ctx)
207 {
208         if (bpi_ctx != NULL)
209                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
210 }
211
212 static inline uint32_t
213 adf_modulo(uint32_t data, uint32_t shift);
214
215 static inline int
216 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
217                 struct qat_crypto_op_cookie *qat_op_cookie);
218
219 void
220 qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
221                 struct rte_cryptodev_sym_session *sess)
222 {
223         PMD_INIT_FUNC_TRACE();
224         uint8_t index = dev->driver_id;
225         void *sess_priv = get_session_private_data(sess, index);
226         struct qat_session *s = (struct qat_session *)sess_priv;
227
228         if (sess_priv) {
229                 if (s->bpi_ctx)
230                         bpi_cipher_ctx_free(s->bpi_ctx);
231                 memset(s, 0, qat_crypto_sym_get_session_private_size(dev));
232                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
233                 set_session_private_data(sess, index, NULL);
234                 rte_mempool_put(sess_mp, sess_priv);
235         }
236 }
237
238 static int
239 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
240 {
241         /* Cipher Only */
242         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
243                 return ICP_QAT_FW_LA_CMD_CIPHER;
244
245         /* Authentication Only */
246         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
247                 return ICP_QAT_FW_LA_CMD_AUTH;
248
249         /* AEAD */
250         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
251                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
252                         return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
253                 else
254                         return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
255         }
256
257         if (xform->next == NULL)
258                 return -1;
259
260         /* Cipher then Authenticate */
261         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
262                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
263                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
264
265         /* Authenticate then Cipher */
266         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
267                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
268                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
269
270         return -1;
271 }
272
273 static struct rte_crypto_auth_xform *
274 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
275 {
276         do {
277                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
278                         return &xform->auth;
279
280                 xform = xform->next;
281         } while (xform);
282
283         return NULL;
284 }
285
286 static struct rte_crypto_cipher_xform *
287 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
288 {
289         do {
290                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
291                         return &xform->cipher;
292
293                 xform = xform->next;
294         } while (xform);
295
296         return NULL;
297 }
298 void *
299 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
300                 struct rte_crypto_sym_xform *xform, void *session_private)
301 {
302         struct qat_session *session = session_private;
303         struct qat_pmd_private *internals = dev->data->dev_private;
304         struct rte_crypto_cipher_xform *cipher_xform = NULL;
305
306         /* Get cipher xform from crypto xform chain */
307         cipher_xform = qat_get_cipher_xform(xform);
308
309         session->cipher_iv.offset = cipher_xform->iv.offset;
310         session->cipher_iv.length = cipher_xform->iv.length;
311
312         switch (cipher_xform->algo) {
313         case RTE_CRYPTO_CIPHER_AES_CBC:
314                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
315                                 &session->qat_cipher_alg) != 0) {
316                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
317                         goto error_out;
318                 }
319                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
320                 break;
321         case RTE_CRYPTO_CIPHER_AES_CTR:
322                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
323                                 &session->qat_cipher_alg) != 0) {
324                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
325                         goto error_out;
326                 }
327                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
328                 break;
329         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
330                 if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
331                                         &session->qat_cipher_alg) != 0) {
332                         PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
333                         goto error_out;
334                 }
335                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
336                 break;
337         case RTE_CRYPTO_CIPHER_NULL:
338                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
339                 break;
340         case RTE_CRYPTO_CIPHER_KASUMI_F8:
341                 if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
342                                         &session->qat_cipher_alg) != 0) {
343                         PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
344                         goto error_out;
345                 }
346                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
347                 break;
348         case RTE_CRYPTO_CIPHER_3DES_CBC:
349                 if (qat_alg_validate_3des_key(cipher_xform->key.length,
350                                 &session->qat_cipher_alg) != 0) {
351                         PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
352                         goto error_out;
353                 }
354                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
355                 break;
356         case RTE_CRYPTO_CIPHER_DES_CBC:
357                 if (qat_alg_validate_des_key(cipher_xform->key.length,
358                                 &session->qat_cipher_alg) != 0) {
359                         PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
360                         goto error_out;
361                 }
362                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
363                 break;
364         case RTE_CRYPTO_CIPHER_3DES_CTR:
365                 if (qat_alg_validate_3des_key(cipher_xform->key.length,
366                                 &session->qat_cipher_alg) != 0) {
367                         PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
368                         goto error_out;
369                 }
370                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
371                 break;
372         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
373                 session->bpi_ctx = bpi_cipher_ctx_init(
374                                         cipher_xform->algo,
375                                         cipher_xform->op,
376                                         cipher_xform->key.data);
377                 if (session->bpi_ctx == NULL) {
378                         PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
379                         goto error_out;
380                 }
381                 if (qat_alg_validate_des_key(cipher_xform->key.length,
382                                 &session->qat_cipher_alg) != 0) {
383                         PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
384                         goto error_out;
385                 }
386                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
387                 break;
388         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
389                 session->bpi_ctx = bpi_cipher_ctx_init(
390                                         cipher_xform->algo,
391                                         cipher_xform->op,
392                                         cipher_xform->key.data);
393                 if (session->bpi_ctx == NULL) {
394                         PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
395                         goto error_out;
396                 }
397                 if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
398                                 &session->qat_cipher_alg) != 0) {
399                         PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
400                         goto error_out;
401                 }
402                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
403                 break;
404         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
405                 if (!qat_is_cipher_alg_supported(
406                         cipher_xform->algo, internals)) {
407                         PMD_DRV_LOG(ERR, "%s not supported on this device",
408                                 rte_crypto_cipher_algorithm_strings
409                                         [cipher_xform->algo]);
410                         goto error_out;
411                 }
412                 if (qat_alg_validate_zuc_key(cipher_xform->key.length,
413                                 &session->qat_cipher_alg) != 0) {
414                         PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
415                         goto error_out;
416                 }
417                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
418                 break;
419         case RTE_CRYPTO_CIPHER_3DES_ECB:
420         case RTE_CRYPTO_CIPHER_AES_ECB:
421         case RTE_CRYPTO_CIPHER_AES_F8:
422         case RTE_CRYPTO_CIPHER_AES_XTS:
423         case RTE_CRYPTO_CIPHER_ARC4:
424                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
425                                 cipher_xform->algo);
426                 goto error_out;
427         default:
428                 PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
429                                 cipher_xform->algo);
430                 goto error_out;
431         }
432
433         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
434                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
435         else
436                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
437
438         if (qat_alg_aead_session_create_content_desc_cipher(session,
439                                                 cipher_xform->key.data,
440                                                 cipher_xform->key.length))
441                 goto error_out;
442
443         return session;
444
445 error_out:
446         if (session->bpi_ctx) {
447                 bpi_cipher_ctx_free(session->bpi_ctx);
448                 session->bpi_ctx = NULL;
449         }
450         return NULL;
451 }
452
453 int
454 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
455                 struct rte_crypto_sym_xform *xform,
456                 struct rte_cryptodev_sym_session *sess,
457                 struct rte_mempool *mempool)
458 {
459         void *sess_private_data;
460
461         if (rte_mempool_get(mempool, &sess_private_data)) {
462                 CDEV_LOG_ERR(
463                         "Couldn't get object from session mempool");
464                 return -1;
465         }
466
467         if (qat_crypto_set_session_parameters(dev, xform, sess_private_data) != 0) {
468                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure "
469                                 "session parameters");
470
471                 /* Return session to mempool */
472                 rte_mempool_put(mempool, sess_private_data);
473                 return -1;
474         }
475
476         set_session_private_data(sess, dev->driver_id,
477                 sess_private_data);
478
479         return 0;
480 }
481
482 int
483 qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
484                 struct rte_crypto_sym_xform *xform, void *session_private)
485 {
486         struct qat_session *session = session_private;
487
488         int qat_cmd_id;
489         PMD_INIT_FUNC_TRACE();
490
491         /* Set context descriptor physical address */
492         session->cd_paddr = rte_mempool_virt2phy(NULL, session) +
493                         offsetof(struct qat_session, cd);
494
495         /* Get requested QAT command id */
496         qat_cmd_id = qat_get_cmd_id(xform);
497         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
498                 PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
499                 goto error_out;
500         }
501         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
502         switch (session->qat_cmd) {
503         case ICP_QAT_FW_LA_CMD_CIPHER:
504         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
505                 break;
506         case ICP_QAT_FW_LA_CMD_AUTH:
507         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
508                 break;
509         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
510                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
511                         session = qat_crypto_sym_configure_session_aead(xform,
512                                         session);
513                 else {
514                         session = qat_crypto_sym_configure_session_cipher(dev,
515                                         xform, session);
516                         session = qat_crypto_sym_configure_session_auth(dev,
517                                         xform, session);
518                 }
519                 break;
520         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
521                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
522                         session = qat_crypto_sym_configure_session_aead(xform,
523                                         session);
524                 else {
525                         session = qat_crypto_sym_configure_session_auth(dev,
526                                         xform, session);
527                         session = qat_crypto_sym_configure_session_cipher(dev,
528                                         xform, session);
529                 }
530                 break;
531         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
532         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
533         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
534         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
535         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
536         case ICP_QAT_FW_LA_CMD_MGF1:
537         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
538         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
539         case ICP_QAT_FW_LA_CMD_DELIMITER:
540         PMD_DRV_LOG(ERR, "Unsupported Service %u",
541                 session->qat_cmd);
542                 goto error_out;
543         default:
544         PMD_DRV_LOG(ERR, "Unsupported Service %u",
545                 session->qat_cmd);
546                 goto error_out;
547         }
548
549         return 0;
550
551 error_out:
552         return -1;
553 }
554
555 struct qat_session *
556 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
557                                 struct rte_crypto_sym_xform *xform,
558                                 struct qat_session *session_private)
559 {
560
561         struct qat_session *session = session_private;
562         struct rte_crypto_auth_xform *auth_xform = NULL;
563         struct qat_pmd_private *internals = dev->data->dev_private;
564         auth_xform = qat_get_auth_xform(xform);
565         uint8_t *key_data = auth_xform->key.data;
566         uint8_t key_length = auth_xform->key.length;
567
568         switch (auth_xform->algo) {
569         case RTE_CRYPTO_AUTH_SHA1_HMAC:
570                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
571                 break;
572         case RTE_CRYPTO_AUTH_SHA224_HMAC:
573                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
574                 break;
575         case RTE_CRYPTO_AUTH_SHA256_HMAC:
576                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
577                 break;
578         case RTE_CRYPTO_AUTH_SHA384_HMAC:
579                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
580                 break;
581         case RTE_CRYPTO_AUTH_SHA512_HMAC:
582                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
583                 break;
584         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
585                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
586                 break;
587         case RTE_CRYPTO_AUTH_AES_GMAC:
588                 if (qat_alg_validate_aes_key(auth_xform->key.length,
589                                 &session->qat_cipher_alg) != 0) {
590                         PMD_DRV_LOG(ERR, "Invalid AES key size");
591                         goto error_out;
592                 }
593                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
594                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
595
596                 break;
597         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
598                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
599                 break;
600         case RTE_CRYPTO_AUTH_MD5_HMAC:
601                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
602                 break;
603         case RTE_CRYPTO_AUTH_NULL:
604                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
605                 break;
606         case RTE_CRYPTO_AUTH_KASUMI_F9:
607                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
608                 break;
609         case RTE_CRYPTO_AUTH_ZUC_EIA3:
610                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
611                         PMD_DRV_LOG(ERR, "%s not supported on this device",
612                                 rte_crypto_auth_algorithm_strings
613                                 [auth_xform->algo]);
614                         goto error_out;
615                 }
616                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
617                 break;
618         case RTE_CRYPTO_AUTH_SHA1:
619         case RTE_CRYPTO_AUTH_SHA256:
620         case RTE_CRYPTO_AUTH_SHA512:
621         case RTE_CRYPTO_AUTH_SHA224:
622         case RTE_CRYPTO_AUTH_SHA384:
623         case RTE_CRYPTO_AUTH_MD5:
624         case RTE_CRYPTO_AUTH_AES_CMAC:
625         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
626                 PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
627                                 auth_xform->algo);
628                 goto error_out;
629         default:
630                 PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
631                                 auth_xform->algo);
632                 goto error_out;
633         }
634
635         session->auth_iv.offset = auth_xform->iv.offset;
636         session->auth_iv.length = auth_xform->iv.length;
637
638         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
639                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
640                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
641                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
642                         /*
643                          * It needs to create cipher desc content first,
644                          * then authentication
645                          */
646                         if (qat_alg_aead_session_create_content_desc_cipher(session,
647                                                 auth_xform->key.data,
648                                                 auth_xform->key.length))
649                                 goto error_out;
650
651                         if (qat_alg_aead_session_create_content_desc_auth(session,
652                                                 key_data,
653                                                 key_length,
654                                                 0,
655                                                 auth_xform->digest_length,
656                                                 auth_xform->op))
657                                 goto error_out;
658                 } else {
659                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
660                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
661                         /*
662                          * It needs to create authentication desc content first,
663                          * then cipher
664                          */
665                         if (qat_alg_aead_session_create_content_desc_auth(session,
666                                         key_data,
667                                         key_length,
668                                         0,
669                                         auth_xform->digest_length,
670                                         auth_xform->op))
671                                 goto error_out;
672
673                         if (qat_alg_aead_session_create_content_desc_cipher(session,
674                                                 auth_xform->key.data,
675                                                 auth_xform->key.length))
676                                 goto error_out;
677                 }
678                 /* Restore to authentication only only */
679                 session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
680         } else {
681                 if (qat_alg_aead_session_create_content_desc_auth(session,
682                                 key_data,
683                                 key_length,
684                                 0,
685                                 auth_xform->digest_length,
686                                 auth_xform->op))
687                         goto error_out;
688         }
689
690         session->digest_length = auth_xform->digest_length;
691         return session;
692
693 error_out:
694         return NULL;
695 }
696
697 struct qat_session *
698 qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
699                                 struct qat_session *session_private)
700 {
701         struct qat_session *session = session_private;
702         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
703
704         /*
705          * Store AEAD IV parameters as cipher IV,
706          * to avoid unnecessary memory usage
707          */
708         session->cipher_iv.offset = xform->aead.iv.offset;
709         session->cipher_iv.length = xform->aead.iv.length;
710
711         switch (aead_xform->algo) {
712         case RTE_CRYPTO_AEAD_AES_GCM:
713                 if (qat_alg_validate_aes_key(aead_xform->key.length,
714                                 &session->qat_cipher_alg) != 0) {
715                         PMD_DRV_LOG(ERR, "Invalid AES key size");
716                         goto error_out;
717                 }
718                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
719                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
720                 break;
721         case RTE_CRYPTO_AEAD_AES_CCM:
722                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported AEAD alg %u",
723                                 aead_xform->algo);
724                 goto error_out;
725         default:
726                 PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
727                                 aead_xform->algo);
728                 goto error_out;
729         }
730
731         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
732                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
733                 /*
734                  * It needs to create cipher desc content first,
735                  * then authentication
736                  */
737                 if (qat_alg_aead_session_create_content_desc_cipher(session,
738                                         aead_xform->key.data,
739                                         aead_xform->key.length))
740                         goto error_out;
741
742                 if (qat_alg_aead_session_create_content_desc_auth(session,
743                                         aead_xform->key.data,
744                                         aead_xform->key.length,
745                                         aead_xform->add_auth_data_length,
746                                         aead_xform->digest_length,
747                                         RTE_CRYPTO_AUTH_OP_GENERATE))
748                         goto error_out;
749         } else {
750                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
751                 /*
752                  * It needs to create authentication desc content first,
753                  * then cipher
754                  */
755                 if (qat_alg_aead_session_create_content_desc_auth(session,
756                                         aead_xform->key.data,
757                                         aead_xform->key.length,
758                                         aead_xform->add_auth_data_length,
759                                         aead_xform->digest_length,
760                                         RTE_CRYPTO_AUTH_OP_VERIFY))
761                         goto error_out;
762
763                 if (qat_alg_aead_session_create_content_desc_cipher(session,
764                                         aead_xform->key.data,
765                                         aead_xform->key.length))
766                         goto error_out;
767         }
768
769         session->digest_length = aead_xform->digest_length;
770         return session;
771
772 error_out:
773         return NULL;
774 }
775
776 unsigned qat_crypto_sym_get_session_private_size(
777                 struct rte_cryptodev *dev __rte_unused)
778 {
779         return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
780 }
781
782 static inline uint32_t
783 qat_bpicipher_preprocess(struct qat_session *ctx,
784                                 struct rte_crypto_op *op)
785 {
786         uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
787         struct rte_crypto_sym_op *sym_op = op->sym;
788         uint8_t last_block_len = block_len > 0 ?
789                         sym_op->cipher.data.length % block_len : 0;
790
791         if (last_block_len &&
792                         ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
793
794                 /* Decrypt last block */
795                 uint8_t *last_block, *dst, *iv;
796                 uint32_t last_block_offset = sym_op->cipher.data.offset +
797                                 sym_op->cipher.data.length - last_block_len;
798                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
799                                 uint8_t *, last_block_offset);
800
801                 if (unlikely(sym_op->m_dst != NULL))
802                         /* out-of-place operation (OOP) */
803                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
804                                                 uint8_t *, last_block_offset);
805                 else
806                         dst = last_block;
807
808                 if (last_block_len < sym_op->cipher.data.length)
809                         /* use previous block ciphertext as IV */
810                         iv = last_block - block_len;
811                 else
812                         /* runt block, i.e. less than one full block */
813                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
814                                         ctx->cipher_iv.offset);
815
816 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
817                 rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
818                         last_block_len);
819                 if (sym_op->m_dst != NULL)
820                         rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
821                                 last_block_len);
822 #endif
823                 bpi_cipher_decrypt(last_block, dst, iv, block_len,
824                                 last_block_len, ctx->bpi_ctx);
825 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
826                 rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
827                         last_block_len);
828                 if (sym_op->m_dst != NULL)
829                         rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
830                                 last_block_len);
831 #endif
832         }
833
834         return sym_op->cipher.data.length - last_block_len;
835 }
836
837 static inline uint32_t
838 qat_bpicipher_postprocess(struct qat_session *ctx,
839                                 struct rte_crypto_op *op)
840 {
841         uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
842         struct rte_crypto_sym_op *sym_op = op->sym;
843         uint8_t last_block_len = block_len > 0 ?
844                         sym_op->cipher.data.length % block_len : 0;
845
846         if (last_block_len > 0 &&
847                         ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
848
849                 /* Encrypt last block */
850                 uint8_t *last_block, *dst, *iv;
851                 uint32_t last_block_offset;
852
853                 last_block_offset = sym_op->cipher.data.offset +
854                                 sym_op->cipher.data.length - last_block_len;
855                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
856                                 uint8_t *, last_block_offset);
857
858                 if (unlikely(sym_op->m_dst != NULL))
859                         /* out-of-place operation (OOP) */
860                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
861                                                 uint8_t *, last_block_offset);
862                 else
863                         dst = last_block;
864
865                 if (last_block_len < sym_op->cipher.data.length)
866                         /* use previous block ciphertext as IV */
867                         iv = dst - block_len;
868                 else
869                         /* runt block, i.e. less than one full block */
870                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
871                                         ctx->cipher_iv.offset);
872
873 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
874                 rte_hexdump(stdout, "BPI: src before post-process:", last_block,
875                         last_block_len);
876                 if (sym_op->m_dst != NULL)
877                         rte_hexdump(stdout, "BPI: dst before post-process:",
878                                         dst, last_block_len);
879 #endif
880                 bpi_cipher_encrypt(last_block, dst, iv, block_len,
881                                 last_block_len, ctx->bpi_ctx);
882 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
883                 rte_hexdump(stdout, "BPI: src after post-process:", last_block,
884                         last_block_len);
885                 if (sym_op->m_dst != NULL)
886                         rte_hexdump(stdout, "BPI: dst after post-process:", dst,
887                                 last_block_len);
888 #endif
889         }
890         return sym_op->cipher.data.length - last_block_len;
891 }
892
893 uint16_t
894 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
895                 uint16_t nb_ops)
896 {
897         register struct qat_queue *queue;
898         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
899         register uint32_t nb_ops_sent = 0;
900         register struct rte_crypto_op **cur_op = ops;
901         register int ret;
902         uint16_t nb_ops_possible = nb_ops;
903         register uint8_t *base_addr;
904         register uint32_t tail;
905         int overflow;
906
907         if (unlikely(nb_ops == 0))
908                 return 0;
909
910         /* read params used a lot in main loop into registers */
911         queue = &(tmp_qp->tx_q);
912         base_addr = (uint8_t *)queue->base_addr;
913         tail = queue->tail;
914
915         /* Find how many can actually fit on the ring */
916         overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
917                                 - queue->max_inflights;
918         if (overflow > 0) {
919                 rte_atomic16_sub(&tmp_qp->inflights16, overflow);
920                 nb_ops_possible = nb_ops - overflow;
921                 if (nb_ops_possible == 0)
922                         return 0;
923         }
924
925         while (nb_ops_sent != nb_ops_possible) {
926                 ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
927                                 tmp_qp->op_cookies[tail / queue->msg_size]);
928                 if (ret != 0) {
929                         tmp_qp->stats.enqueue_err_count++;
930                         /*
931                          * This message cannot be enqueued,
932                          * decrease number of ops that wasn't sent
933                          */
934                         rte_atomic16_sub(&tmp_qp->inflights16,
935                                         nb_ops_possible - nb_ops_sent);
936                         if (nb_ops_sent == 0)
937                                 return 0;
938                         goto kick_tail;
939                 }
940
941                 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
942                 nb_ops_sent++;
943                 cur_op++;
944         }
945 kick_tail:
946         WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
947                         queue->hw_queue_number, tail);
948         queue->tail = tail;
949         tmp_qp->stats.enqueued_count += nb_ops_sent;
950         return nb_ops_sent;
951 }
952
953 uint16_t
954 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
955                 uint16_t nb_ops)
956 {
957         struct qat_queue *queue;
958         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
959         uint32_t msg_counter = 0;
960         struct rte_crypto_op *rx_op;
961         struct icp_qat_fw_comn_resp *resp_msg;
962
963         queue = &(tmp_qp->rx_q);
964         resp_msg = (struct icp_qat_fw_comn_resp *)
965                         ((uint8_t *)queue->base_addr + queue->head);
966
967         while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
968                         msg_counter != nb_ops) {
969                 rx_op = (struct rte_crypto_op *)(uintptr_t)
970                                 (resp_msg->opaque_data);
971
972 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
973                 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
974                         sizeof(struct icp_qat_fw_comn_resp));
975
976 #endif
977                 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
978                                 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
979                                         resp_msg->comn_hdr.comn_status)) {
980                         rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
981                 } else {
982                         struct qat_session *sess = (struct qat_session *)
983                                         get_session_private_data(
984                                         rx_op->sym->session,
985                                         cryptodev_qat_driver_id);
986
987                         if (sess->bpi_ctx)
988                                 qat_bpicipher_postprocess(sess, rx_op);
989                         rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
990                 }
991
992                 *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
993                 queue->head = adf_modulo(queue->head +
994                                 queue->msg_size,
995                                 ADF_RING_SIZE_MODULO(queue->queue_size));
996                 resp_msg = (struct icp_qat_fw_comn_resp *)
997                                         ((uint8_t *)queue->base_addr +
998                                                         queue->head);
999                 *ops = rx_op;
1000                 ops++;
1001                 msg_counter++;
1002         }
1003         if (msg_counter > 0) {
1004                 WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
1005                                         queue->hw_bundle_number,
1006                                         queue->hw_queue_number, queue->head);
1007                 rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
1008                 tmp_qp->stats.dequeued_count += msg_counter;
1009         }
1010         return msg_counter;
1011 }
1012
1013 static inline int
1014 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
1015                 struct qat_alg_buf_list *list, uint32_t data_len)
1016 {
1017         int nr = 1;
1018
1019         uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
1020                         buff_start + rte_pktmbuf_data_len(buf);
1021
1022         list->bufers[0].addr = buff_start;
1023         list->bufers[0].resrvd = 0;
1024         list->bufers[0].len = buf_len;
1025
1026         if (data_len <= buf_len) {
1027                 list->num_bufs = nr;
1028                 list->bufers[0].len = data_len;
1029                 return 0;
1030         }
1031
1032         buf = buf->next;
1033         while (buf) {
1034                 if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
1035                         PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
1036                                         " entry(%u)",
1037                                         QAT_SGL_MAX_NUMBER);
1038                         return -EINVAL;
1039                 }
1040
1041                 list->bufers[nr].len = rte_pktmbuf_data_len(buf);
1042                 list->bufers[nr].resrvd = 0;
1043                 list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
1044
1045                 buf_len += list->bufers[nr].len;
1046                 buf = buf->next;
1047
1048                 if (buf_len > data_len) {
1049                         list->bufers[nr].len -=
1050                                 buf_len - data_len;
1051                         buf = NULL;
1052                 }
1053                 ++nr;
1054         }
1055         list->num_bufs = nr;
1056
1057         return 0;
1058 }
1059
1060 static inline void
1061 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
1062                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
1063                 struct rte_crypto_op *op,
1064                 struct icp_qat_fw_la_bulk_req *qat_req)
1065 {
1066         /* copy IV into request if it fits */
1067         if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
1068                 rte_memcpy(cipher_param->u.cipher_IV_array,
1069                                 rte_crypto_op_ctod_offset(op, uint8_t *,
1070                                         iv_offset),
1071                                 iv_length);
1072         } else {
1073                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
1074                                 qat_req->comn_hdr.serv_specif_flags,
1075                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
1076                 cipher_param->u.s.cipher_IV_ptr =
1077                                 rte_crypto_op_ctophys_offset(op,
1078                                         iv_offset);
1079         }
1080 }
1081
1082 static inline int
1083 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
1084                 struct qat_crypto_op_cookie *qat_op_cookie)
1085 {
1086         int ret = 0;
1087         struct qat_session *ctx;
1088         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1089         struct icp_qat_fw_la_auth_req_params *auth_param;
1090         register struct icp_qat_fw_la_bulk_req *qat_req;
1091         uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
1092         uint32_t cipher_len = 0, cipher_ofs = 0;
1093         uint32_t auth_len = 0, auth_ofs = 0;
1094         uint32_t min_ofs = 0;
1095         uint64_t src_buf_start = 0, dst_buf_start = 0;
1096         uint8_t do_sgl = 0;
1097
1098 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1099         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
1100                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
1101                                 "operation requests, op (%p) is not a "
1102                                 "symmetric operation.", op);
1103                 return -EINVAL;
1104         }
1105 #endif
1106         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
1107                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
1108                                 " requests, op (%p) is sessionless.", op);
1109                 return -EINVAL;
1110         }
1111
1112         ctx = (struct qat_session *)get_session_private_data(
1113                         op->sym->session, cryptodev_qat_driver_id);
1114
1115         if (unlikely(ctx == NULL)) {
1116                 PMD_DRV_LOG(ERR, "Session was not created for this device");
1117                 return -EINVAL;
1118         }
1119
1120         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
1121         rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
1122         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
1123         cipher_param = (void *)&qat_req->serv_specif_rqpars;
1124         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
1125
1126         if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
1127                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1128                 /* AES-GCM */
1129                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1130                                 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1131                         do_aead = 1;
1132                 } else {
1133                         do_auth = 1;
1134                         do_cipher = 1;
1135                 }
1136         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1137                 do_auth = 1;
1138                 do_cipher = 0;
1139         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1140                 do_auth = 0;
1141                 do_cipher = 1;
1142         }
1143
1144         if (do_cipher) {
1145
1146                 if (ctx->qat_cipher_alg ==
1147                                          ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
1148                         ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
1149                         ctx->qat_cipher_alg ==
1150                                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1151
1152                         if (unlikely(
1153                                 (cipher_param->cipher_length % BYTE_LENGTH != 0)
1154                                  || (cipher_param->cipher_offset
1155                                                         % BYTE_LENGTH != 0))) {
1156                                 PMD_DRV_LOG(ERR,
1157                   "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
1158                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1159                                 return -EINVAL;
1160                         }
1161                         cipher_len = op->sym->cipher.data.length >> 3;
1162                         cipher_ofs = op->sym->cipher.data.offset >> 3;
1163
1164                 } else if (ctx->bpi_ctx) {
1165                         /* DOCSIS - only send complete blocks to device
1166                          * Process any partial block using CFB mode.
1167                          * Even if 0 complete blocks, still send this to device
1168                          * to get into rx queue for post-process and dequeuing
1169                          */
1170                         cipher_len = qat_bpicipher_preprocess(ctx, op);
1171                         cipher_ofs = op->sym->cipher.data.offset;
1172                 } else {
1173                         cipher_len = op->sym->cipher.data.length;
1174                         cipher_ofs = op->sym->cipher.data.offset;
1175                 }
1176
1177                 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
1178                                 cipher_param, op, qat_req);
1179                 min_ofs = cipher_ofs;
1180         }
1181
1182         if (do_auth) {
1183
1184                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
1185                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
1186                         ctx->qat_hash_alg ==
1187                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
1188                         if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
1189                                 || (auth_param->auth_len % BYTE_LENGTH != 0))) {
1190                                 PMD_DRV_LOG(ERR,
1191                 "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
1192                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1193                                 return -EINVAL;
1194                         }
1195                         auth_ofs = op->sym->auth.data.offset >> 3;
1196                         auth_len = op->sym->auth.data.length >> 3;
1197
1198                         if (ctx->qat_hash_alg ==
1199                                         ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
1200                                 if (do_cipher) {
1201                                         auth_len = auth_len + auth_ofs + 1 -
1202                                                 ICP_QAT_HW_KASUMI_BLK_SZ;
1203                                         auth_ofs = ICP_QAT_HW_KASUMI_BLK_SZ;
1204                                 } else {
1205                                         auth_len = auth_len + auth_ofs + 1;
1206                                         auth_ofs = 0;
1207                                 }
1208                         } else
1209                                 auth_param->u1.aad_adr =
1210                                         rte_crypto_op_ctophys_offset(op,
1211                                                         ctx->auth_iv.offset);
1212
1213                 } else if (ctx->qat_hash_alg ==
1214                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1215                                 ctx->qat_hash_alg ==
1216                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1217                         /* AES-GMAC */
1218                         set_cipher_iv(ctx->auth_iv.length,
1219                                 ctx->auth_iv.offset,
1220                                 cipher_param, op, qat_req);
1221                         auth_ofs = op->sym->auth.data.offset;
1222                         auth_len = op->sym->auth.data.length;
1223
1224                         auth_param->u1.aad_adr = 0;
1225                         auth_param->u2.aad_sz = 0;
1226
1227                         /*
1228                          * If len(iv)==12B fw computes J0
1229                          */
1230                         if (ctx->auth_iv.length == 12) {
1231                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1232                                         qat_req->comn_hdr.serv_specif_flags,
1233                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1234
1235                         }
1236                 } else {
1237                         auth_ofs = op->sym->auth.data.offset;
1238                         auth_len = op->sym->auth.data.length;
1239
1240                 }
1241                 min_ofs = auth_ofs;
1242
1243                 auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
1244
1245         }
1246
1247         if (do_aead) {
1248                 if (ctx->qat_hash_alg ==
1249                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1250                                 ctx->qat_hash_alg ==
1251                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1252                         /*
1253                          * If len(iv)==12B fw computes J0
1254                          */
1255                         if (ctx->cipher_iv.length == 12) {
1256                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1257                                         qat_req->comn_hdr.serv_specif_flags,
1258                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1259                         }
1260
1261                 }
1262
1263                 cipher_len = op->sym->aead.data.length;
1264                 cipher_ofs = op->sym->aead.data.offset;
1265                 auth_len = op->sym->aead.data.length;
1266                 auth_ofs = op->sym->aead.data.offset;
1267
1268                 auth_param->u1.aad_adr = op->sym->aead.aad.phys_addr;
1269                 auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
1270                 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
1271                                 cipher_param, op, qat_req);
1272                 min_ofs = op->sym->aead.data.offset;
1273         }
1274
1275         if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
1276                 do_sgl = 1;
1277
1278         /* adjust for chain case */
1279         if (do_cipher && do_auth)
1280                 min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
1281
1282         if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
1283                 min_ofs = 0;
1284
1285         if (unlikely(op->sym->m_dst != NULL)) {
1286                 /* Out-of-place operation (OOP)
1287                  * Don't align DMA start. DMA the minimum data-set
1288                  * so as not to overwrite data in dest buffer
1289                  */
1290                 src_buf_start =
1291                         rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
1292                 dst_buf_start =
1293                         rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
1294
1295         } else {
1296                 /* In-place operation
1297                  * Start DMA at nearest aligned address below min_ofs
1298                  */
1299                 src_buf_start =
1300                         rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
1301                                                 & QAT_64_BTYE_ALIGN_MASK;
1302
1303                 if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
1304                                         rte_pktmbuf_headroom(op->sym->m_src))
1305                                                         > src_buf_start)) {
1306                         /* alignment has pushed addr ahead of start of mbuf
1307                          * so revert and take the performance hit
1308                          */
1309                         src_buf_start =
1310                                 rte_pktmbuf_mtophys_offset(op->sym->m_src,
1311                                                                 min_ofs);
1312                 }
1313                 dst_buf_start = src_buf_start;
1314         }
1315
1316         if (do_cipher || do_aead) {
1317                 cipher_param->cipher_offset =
1318                                 (uint32_t)rte_pktmbuf_mtophys_offset(
1319                                 op->sym->m_src, cipher_ofs) - src_buf_start;
1320                 cipher_param->cipher_length = cipher_len;
1321         } else {
1322                 cipher_param->cipher_offset = 0;
1323                 cipher_param->cipher_length = 0;
1324         }
1325
1326         if (do_auth || do_aead) {
1327                 auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
1328                                 op->sym->m_src, auth_ofs) - src_buf_start;
1329                 auth_param->auth_len = auth_len;
1330         } else {
1331                 auth_param->auth_off = 0;
1332                 auth_param->auth_len = 0;
1333         }
1334
1335         qat_req->comn_mid.dst_length =
1336                 qat_req->comn_mid.src_length =
1337                 (cipher_param->cipher_offset + cipher_param->cipher_length)
1338                 > (auth_param->auth_off + auth_param->auth_len) ?
1339                 (cipher_param->cipher_offset + cipher_param->cipher_length)
1340                 : (auth_param->auth_off + auth_param->auth_len);
1341
1342         if (do_sgl) {
1343
1344                 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
1345                                 QAT_COMN_PTR_TYPE_SGL);
1346                 ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
1347                                 &qat_op_cookie->qat_sgl_list_src,
1348                                 qat_req->comn_mid.src_length);
1349                 if (ret) {
1350                         PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
1351                         return ret;
1352                 }
1353
1354                 if (likely(op->sym->m_dst == NULL))
1355                         qat_req->comn_mid.dest_data_addr =
1356                                 qat_req->comn_mid.src_data_addr =
1357                                 qat_op_cookie->qat_sgl_src_phys_addr;
1358                 else {
1359                         ret = qat_sgl_fill_array(op->sym->m_dst,
1360                                         dst_buf_start,
1361                                         &qat_op_cookie->qat_sgl_list_dst,
1362                                                 qat_req->comn_mid.dst_length);
1363
1364                         if (ret) {
1365                                 PMD_DRV_LOG(ERR, "QAT PMD Cannot "
1366                                                 "fill sgl array");
1367                                 return ret;
1368                         }
1369
1370                         qat_req->comn_mid.src_data_addr =
1371                                 qat_op_cookie->qat_sgl_src_phys_addr;
1372                         qat_req->comn_mid.dest_data_addr =
1373                                         qat_op_cookie->qat_sgl_dst_phys_addr;
1374                 }
1375         } else {
1376                 qat_req->comn_mid.src_data_addr = src_buf_start;
1377                 qat_req->comn_mid.dest_data_addr = dst_buf_start;
1378         }
1379
1380 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1381         rte_hexdump(stdout, "qat_req:", qat_req,
1382                         sizeof(struct icp_qat_fw_la_bulk_req));
1383         rte_hexdump(stdout, "src_data:",
1384                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
1385                         rte_pktmbuf_data_len(op->sym->m_src));
1386         if (do_cipher) {
1387                 uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
1388                                                 uint8_t *,
1389                                                 ctx->cipher_iv.offset);
1390                 rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
1391                                 ctx->cipher_iv.length);
1392         }
1393
1394         if (do_auth) {
1395                 if (ctx->auth_iv.length) {
1396                         uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
1397                                                         uint8_t *,
1398                                                         ctx->auth_iv.offset);
1399                         rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
1400                                                 ctx->auth_iv.length);
1401                 }
1402                 rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
1403                                 ctx->digest_length);
1404         }
1405
1406         if (do_aead) {
1407                 rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
1408                                 ctx->digest_length);
1409                 rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
1410                                 ctx->aad_len);
1411         }
1412 #endif
1413         return 0;
1414 }
1415
1416 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
1417 {
1418         uint32_t div = data >> shift;
1419         uint32_t mult = div << shift;
1420
1421         return data - mult;
1422 }
1423
1424 int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
1425                 __rte_unused struct rte_cryptodev_config *config)
1426 {
1427         PMD_INIT_FUNC_TRACE();
1428         return 0;
1429 }
1430
1431 int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
1432 {
1433         PMD_INIT_FUNC_TRACE();
1434         return 0;
1435 }
1436
1437 void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
1438 {
1439         PMD_INIT_FUNC_TRACE();
1440 }
1441
1442 int qat_dev_close(struct rte_cryptodev *dev)
1443 {
1444         int i, ret;
1445
1446         PMD_INIT_FUNC_TRACE();
1447
1448         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1449                 ret = qat_crypto_sym_qp_release(dev, i);
1450                 if (ret < 0)
1451                         return ret;
1452         }
1453
1454         return 0;
1455 }
1456
1457 void qat_dev_info_get(struct rte_cryptodev *dev,
1458                         struct rte_cryptodev_info *info)
1459 {
1460         struct qat_pmd_private *internals = dev->data->dev_private;
1461
1462         PMD_INIT_FUNC_TRACE();
1463         if (info != NULL) {
1464                 info->max_nb_queue_pairs =
1465                                 ADF_NUM_SYM_QPS_PER_BUNDLE *
1466                                 ADF_NUM_BUNDLES_PER_DEV;
1467                 info->feature_flags = dev->feature_flags;
1468                 info->capabilities = internals->qat_dev_capabilities;
1469                 info->sym.max_nb_sessions = internals->max_nb_sessions;
1470                 info->driver_id = cryptodev_qat_driver_id;
1471                 info->pci_dev = RTE_DEV_TO_PCI(dev->device);
1472         }
1473 }
1474
1475 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
1476                 struct rte_cryptodev_stats *stats)
1477 {
1478         int i;
1479         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1480
1481         PMD_INIT_FUNC_TRACE();
1482         if (stats == NULL) {
1483                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1484                 return;
1485         }
1486         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1487                 if (qp[i] == NULL) {
1488                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1489                         continue;
1490                 }
1491
1492                 stats->enqueued_count += qp[i]->stats.enqueued_count;
1493                 stats->dequeued_count += qp[i]->stats.dequeued_count;
1494                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
1495                 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
1496         }
1497 }
1498
1499 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
1500 {
1501         int i;
1502         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1503
1504         PMD_INIT_FUNC_TRACE();
1505         for (i = 0; i < dev->data->nb_queue_pairs; i++)
1506                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
1507         PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
1508 }