remove unused ring includes
[dpdk.git] / drivers / crypto / qat / qat_crypto.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *       * Redistributions of source code must retain the above copyright
12  *         notice, this list of conditions and the following disclaimer.
13  *       * Redistributions in binary form must reproduce the above copyright
14  *         notice, this list of conditions and the following disclaimer in
15  *         the documentation and/or other materials provided with the
16  *         distribution.
17  *       * Neither the name of Intel Corporation nor the names of its
18  *         contributors may be used to endorse or promote products derived
19  *         from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <string.h>
38 #include <inttypes.h>
39 #include <errno.h>
40 #include <sys/queue.h>
41 #include <stdarg.h>
42
43 #include <rte_common.h>
44 #include <rte_log.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_tailq.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_launch.h>
52 #include <rte_eal.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_mempool.h>
58 #include <rte_mbuf.h>
59 #include <rte_string_fns.h>
60 #include <rte_spinlock.h>
61 #include <rte_hexdump.h>
62
63 #include "qat_logs.h"
64 #include "qat_algs.h"
65 #include "qat_crypto.h"
66 #include "adf_transport_access_macros.h"
67
68 #define BYTE_LENGTH    8
69
70 static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
71         {       /* SHA1 HMAC */
72                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
73                 {.sym = {
74                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
75                         {.auth = {
76                                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
77                                 .block_size = 64,
78                                 .key_size = {
79                                         .min = 64,
80                                         .max = 64,
81                                         .increment = 0
82                                 },
83                                 .digest_size = {
84                                         .min = 20,
85                                         .max = 20,
86                                         .increment = 0
87                                 },
88                                 .aad_size = { 0 }
89                         }, }
90                 }, }
91         },
92         {       /* SHA256 HMAC */
93                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
94                 {.sym = {
95                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
96                         {.auth = {
97                                 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
98                                 .block_size = 64,
99                                 .key_size = {
100                                         .min = 64,
101                                         .max = 64,
102                                         .increment = 0
103                                 },
104                                 .digest_size = {
105                                         .min = 32,
106                                         .max = 32,
107                                         .increment = 0
108                                 },
109                                 .aad_size = { 0 }
110                         }, }
111                 }, }
112         },
113         {       /* SHA512 HMAC */
114                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
115                 {.sym = {
116                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
117                         {.auth = {
118                                 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
119                                 .block_size = 128,
120                                 .key_size = {
121                                         .min = 128,
122                                         .max = 128,
123                                         .increment = 0
124                                 },
125                                 .digest_size = {
126                                         .min = 64,
127                                         .max = 64,
128                                         .increment = 0
129                                 },
130                                 .aad_size = { 0 }
131                         }, }
132                 }, }
133         },
134         {       /* AES XCBC MAC */
135                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
136                 {.sym = {
137                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
138                         {.auth = {
139                                 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
140                                 .block_size = 16,
141                                 .key_size = {
142                                         .min = 16,
143                                         .max = 16,
144                                         .increment = 0
145                                 },
146                                 .digest_size = {
147                                         .min = 16,
148                                         .max = 16,
149                                         .increment = 0
150                                 },
151                                 .aad_size = { 0 }
152                         }, }
153                 }, }
154         },
155         {       /* AES GCM (AUTH) */
156                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
157                 {.sym = {
158                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
159                         {.auth = {
160                                 .algo = RTE_CRYPTO_AUTH_AES_GCM,
161                                 .block_size = 16,
162                                 .key_size = {
163                                         .min = 16,
164                                         .max = 32,
165                                         .increment = 8
166                                 },
167                                 .digest_size = {
168                                         .min = 8,
169                                         .max = 16,
170                                         .increment = 4
171                                 },
172                                 .aad_size = {
173                                         .min = 8,
174                                         .max = 12,
175                                         .increment = 4
176                                 }
177                         }, }
178                 }, }
179         },
180         {       /* SNOW3G (UIA2) */
181                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
182                 {.sym = {
183                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
184                         {.auth = {
185                                 .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
186                                 .block_size = 16,
187                                 .key_size = {
188                                         .min = 16,
189                                         .max = 16,
190                                         .increment = 0
191                                 },
192                                 .digest_size = {
193                                         .min = 4,
194                                         .max = 4,
195                                         .increment = 0
196                                 },
197                                 .aad_size = {
198                                         .min = 16,
199                                         .max = 16,
200                                         .increment = 0
201                                 }
202                         }, }
203                 }, }
204         },
205         {       /* AES GCM (CIPHER) */
206                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
207                 {.sym = {
208                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
209                         {.cipher = {
210                                 .algo = RTE_CRYPTO_CIPHER_AES_GCM,
211                                 .block_size = 16,
212                                 .key_size = {
213                                         .min = 16,
214                                         .max = 32,
215                                         .increment = 8
216                                 },
217                                 .iv_size = {
218                                         .min = 16,
219                                         .max = 16,
220                                         .increment = 0
221                                 }
222                         }, }
223                 }, }
224         },
225         {       /* AES CBC */
226                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
227                 {.sym = {
228                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
229                         {.cipher = {
230                                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
231                                 .block_size = 16,
232                                 .key_size = {
233                                         .min = 16,
234                                         .max = 32,
235                                         .increment = 8
236                                 },
237                                 .iv_size = {
238                                         .min = 16,
239                                         .max = 16,
240                                         .increment = 0
241                                 }
242                         }, }
243                 }, }
244         },
245         {       /* SNOW3G (UEA2) */
246                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
247                 {.sym = {
248                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
249                         {.cipher = {
250                                 .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
251                                 .block_size = 16,
252                                 .key_size = {
253                                         .min = 16,
254                                         .max = 16,
255                                         .increment = 0
256                                 },
257                                 .iv_size = {
258                                         .min = 16,
259                                         .max = 16,
260                                         .increment = 0
261                                 }
262                         }, }
263                 }, }
264         },
265         {       /* AES CTR */
266                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
267                 {.sym = {
268                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
269                         {.cipher = {
270                                 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
271                                 .block_size = 16,
272                                 .key_size = {
273                                         .min = 16,
274                                         .max = 32,
275                                         .increment = 8
276                                 },
277                                 .iv_size = {
278                                         .min = 16,
279                                         .max = 16,
280                                         .increment = 0
281                                 }
282                         }, }
283                 }, }
284         },
285         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
286 };
287
288 static inline uint32_t
289 adf_modulo(uint32_t data, uint32_t shift);
290
291 static inline int
292 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
293
294 void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
295                 void *session)
296 {
297         struct qat_session *sess = session;
298         phys_addr_t cd_paddr;
299
300         PMD_INIT_FUNC_TRACE();
301         if (session) {
302                 cd_paddr = sess->cd_paddr;
303                 memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
304                 sess->cd_paddr = cd_paddr;
305         } else
306                 PMD_DRV_LOG(ERR, "NULL session");
307 }
308
309 static int
310 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
311 {
312         /* Cipher Only */
313         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
314                 return ICP_QAT_FW_LA_CMD_CIPHER;
315
316         /* Authentication Only */
317         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
318                 return ICP_QAT_FW_LA_CMD_AUTH;
319
320         if (xform->next == NULL)
321                 return -1;
322
323         /* Cipher then Authenticate */
324         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
325                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
326                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
327
328         /* Authenticate then Cipher */
329         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
330                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
331                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
332
333         return -1;
334 }
335
336 static struct rte_crypto_auth_xform *
337 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
338 {
339         do {
340                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
341                         return &xform->auth;
342
343                 xform = xform->next;
344         } while (xform);
345
346         return NULL;
347 }
348
349 static struct rte_crypto_cipher_xform *
350 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
351 {
352         do {
353                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
354                         return &xform->cipher;
355
356                 xform = xform->next;
357         } while (xform);
358
359         return NULL;
360 }
361 void *
362 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
363                 struct rte_crypto_sym_xform *xform, void *session_private)
364 {
365         struct qat_pmd_private *internals = dev->data->dev_private;
366
367         struct qat_session *session = session_private;
368
369         struct rte_crypto_cipher_xform *cipher_xform = NULL;
370
371         /* Get cipher xform from crypto xform chain */
372         cipher_xform = qat_get_cipher_xform(xform);
373
374         switch (cipher_xform->algo) {
375         case RTE_CRYPTO_CIPHER_AES_CBC:
376                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
377                                 &session->qat_cipher_alg) != 0) {
378                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
379                         goto error_out;
380                 }
381                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
382                 break;
383         case RTE_CRYPTO_CIPHER_AES_GCM:
384                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
385                                 &session->qat_cipher_alg) != 0) {
386                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
387                         goto error_out;
388                 }
389                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
390                 break;
391         case RTE_CRYPTO_CIPHER_AES_CTR:
392                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
393                                 &session->qat_cipher_alg) != 0) {
394                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
395                         goto error_out;
396                 }
397                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
398                 break;
399         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
400                 if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
401                                         &session->qat_cipher_alg) != 0) {
402                         PMD_DRV_LOG(ERR, "Invalid SNOW3G cipher key size");
403                         goto error_out;
404                 }
405                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
406                 break;
407         case RTE_CRYPTO_CIPHER_NULL:
408         case RTE_CRYPTO_CIPHER_3DES_ECB:
409         case RTE_CRYPTO_CIPHER_3DES_CBC:
410         case RTE_CRYPTO_CIPHER_AES_ECB:
411         case RTE_CRYPTO_CIPHER_AES_CCM:
412         case RTE_CRYPTO_CIPHER_KASUMI_F8:
413                 PMD_DRV_LOG(ERR, "Crypto: Unsupported Cipher alg %u",
414                                 cipher_xform->algo);
415                 goto error_out;
416         default:
417                 PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
418                                 cipher_xform->algo);
419                 goto error_out;
420         }
421
422         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
423                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
424         else
425                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
426
427         if (qat_alg_aead_session_create_content_desc_cipher(session,
428                                                 cipher_xform->key.data,
429                                                 cipher_xform->key.length))
430                 goto error_out;
431
432         return session;
433
434 error_out:
435         rte_mempool_put(internals->sess_mp, session);
436         return NULL;
437 }
438
439
440 void *
441 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
442                 struct rte_crypto_sym_xform *xform, void *session_private)
443 {
444         struct qat_pmd_private *internals = dev->data->dev_private;
445
446         struct qat_session *session = session_private;
447
448         int qat_cmd_id;
449
450         PMD_INIT_FUNC_TRACE();
451
452         /* Get requested QAT command id */
453         qat_cmd_id = qat_get_cmd_id(xform);
454         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
455                 PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
456                 goto error_out;
457         }
458         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
459         switch (session->qat_cmd) {
460         case ICP_QAT_FW_LA_CMD_CIPHER:
461         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
462                 break;
463         case ICP_QAT_FW_LA_CMD_AUTH:
464         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
465                 break;
466         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
467         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
468         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
469                 break;
470         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
471         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
472         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
473                 break;
474         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
475         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
476         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
477         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
478         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
479         case ICP_QAT_FW_LA_CMD_MGF1:
480         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
481         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
482         case ICP_QAT_FW_LA_CMD_DELIMITER:
483         PMD_DRV_LOG(ERR, "Unsupported Service %u",
484                 session->qat_cmd);
485                 goto error_out;
486         default:
487         PMD_DRV_LOG(ERR, "Unsupported Service %u",
488                 session->qat_cmd);
489                 goto error_out;
490         }
491         return session;
492
493 error_out:
494         rte_mempool_put(internals->sess_mp, session);
495         return NULL;
496 }
497
498 struct qat_session *
499 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
500                                 struct rte_crypto_sym_xform *xform,
501                                 struct qat_session *session_private)
502 {
503
504         struct qat_pmd_private *internals = dev->data->dev_private;
505         struct qat_session *session = session_private;
506         struct rte_crypto_auth_xform *auth_xform = NULL;
507         struct rte_crypto_cipher_xform *cipher_xform = NULL;
508         auth_xform = qat_get_auth_xform(xform);
509
510         switch (auth_xform->algo) {
511         case RTE_CRYPTO_AUTH_SHA1_HMAC:
512                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
513                 break;
514         case RTE_CRYPTO_AUTH_SHA256_HMAC:
515                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
516                 break;
517         case RTE_CRYPTO_AUTH_SHA512_HMAC:
518                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
519                 break;
520         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
521                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
522                 break;
523         case RTE_CRYPTO_AUTH_AES_GCM:
524                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
525                 break;
526         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
527                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
528                 break;
529         case RTE_CRYPTO_AUTH_NULL:
530         case RTE_CRYPTO_AUTH_SHA1:
531         case RTE_CRYPTO_AUTH_SHA256:
532         case RTE_CRYPTO_AUTH_SHA512:
533         case RTE_CRYPTO_AUTH_SHA224:
534         case RTE_CRYPTO_AUTH_SHA224_HMAC:
535         case RTE_CRYPTO_AUTH_SHA384:
536         case RTE_CRYPTO_AUTH_SHA384_HMAC:
537         case RTE_CRYPTO_AUTH_MD5:
538         case RTE_CRYPTO_AUTH_MD5_HMAC:
539         case RTE_CRYPTO_AUTH_AES_CCM:
540         case RTE_CRYPTO_AUTH_AES_GMAC:
541         case RTE_CRYPTO_AUTH_KASUMI_F9:
542         case RTE_CRYPTO_AUTH_AES_CMAC:
543         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
544         case RTE_CRYPTO_AUTH_ZUC_EIA3:
545                 PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
546                                 auth_xform->algo);
547                 goto error_out;
548         default:
549                 PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
550                                 auth_xform->algo);
551                 goto error_out;
552         }
553         cipher_xform = qat_get_cipher_xform(xform);
554
555         if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
556                         (session->qat_hash_alg ==
557                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_64))  {
558                 if (qat_alg_aead_session_create_content_desc_auth(session,
559                                 cipher_xform->key.data,
560                                 cipher_xform->key.length,
561                                 auth_xform->add_auth_data_length,
562                                 auth_xform->digest_length,
563                                 auth_xform->op))
564                         goto error_out;
565         } else {
566                 if (qat_alg_aead_session_create_content_desc_auth(session,
567                                 auth_xform->key.data,
568                                 auth_xform->key.length,
569                                 auth_xform->add_auth_data_length,
570                                 auth_xform->digest_length,
571                                 auth_xform->op))
572                         goto error_out;
573         }
574         return session;
575
576 error_out:
577         rte_mempool_put(internals->sess_mp, session);
578         return NULL;
579 }
580
581 unsigned qat_crypto_sym_get_session_private_size(
582                 struct rte_cryptodev *dev __rte_unused)
583 {
584         return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
585 }
586
587
588 uint16_t
589 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
590                 uint16_t nb_ops)
591 {
592         register struct qat_queue *queue;
593         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
594         register uint32_t nb_ops_sent = 0;
595         register struct rte_crypto_op **cur_op = ops;
596         register int ret;
597         uint16_t nb_ops_possible = nb_ops;
598         register uint8_t *base_addr;
599         register uint32_t tail;
600         int overflow;
601
602         if (unlikely(nb_ops == 0))
603                 return 0;
604
605         /* read params used a lot in main loop into registers */
606         queue = &(tmp_qp->tx_q);
607         base_addr = (uint8_t *)queue->base_addr;
608         tail = queue->tail;
609
610         /* Find how many can actually fit on the ring */
611         overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
612                                 - queue->max_inflights;
613         if (overflow > 0) {
614                 rte_atomic16_sub(&tmp_qp->inflights16, overflow);
615                 nb_ops_possible = nb_ops - overflow;
616                 if (nb_ops_possible == 0)
617                         return 0;
618         }
619
620         while (nb_ops_sent != nb_ops_possible) {
621                 ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
622                 if (ret != 0) {
623                         tmp_qp->stats.enqueue_err_count++;
624                         if (nb_ops_sent == 0)
625                                 return 0;
626                         goto kick_tail;
627                 }
628
629                 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
630                 nb_ops_sent++;
631                 cur_op++;
632         }
633 kick_tail:
634         WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
635                         queue->hw_queue_number, tail);
636         queue->tail = tail;
637         tmp_qp->stats.enqueued_count += nb_ops_sent;
638         return nb_ops_sent;
639 }
640
641 uint16_t
642 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
643                 uint16_t nb_ops)
644 {
645         struct qat_queue *queue;
646         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
647         uint32_t msg_counter = 0;
648         struct rte_crypto_op *rx_op;
649         struct icp_qat_fw_comn_resp *resp_msg;
650
651         queue = &(tmp_qp->rx_q);
652         resp_msg = (struct icp_qat_fw_comn_resp *)
653                         ((uint8_t *)queue->base_addr + queue->head);
654
655         while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
656                         msg_counter != nb_ops) {
657                 rx_op = (struct rte_crypto_op *)(uintptr_t)
658                                 (resp_msg->opaque_data);
659
660 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
661                 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
662                                 sizeof(struct icp_qat_fw_comn_resp));
663 #endif
664                 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
665                                 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
666                                         resp_msg->comn_hdr.comn_status)) {
667                         rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
668                 } else {
669                         rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
670                 }
671                 *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
672                 queue->head = adf_modulo(queue->head +
673                                 queue->msg_size,
674                                 ADF_RING_SIZE_MODULO(queue->queue_size));
675                 resp_msg = (struct icp_qat_fw_comn_resp *)
676                                         ((uint8_t *)queue->base_addr +
677                                                         queue->head);
678                 *ops = rx_op;
679                 ops++;
680                 msg_counter++;
681         }
682         if (msg_counter > 0) {
683                 WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
684                                         queue->hw_bundle_number,
685                                         queue->hw_queue_number, queue->head);
686                 rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
687                 tmp_qp->stats.dequeued_count += msg_counter;
688         }
689         return msg_counter;
690 }
691
692 static inline int
693 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
694 {
695         struct qat_session *ctx;
696         struct icp_qat_fw_la_cipher_req_params *cipher_param;
697         struct icp_qat_fw_la_auth_req_params *auth_param;
698         register struct icp_qat_fw_la_bulk_req *qat_req;
699
700 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
701         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
702                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
703                                 "operation requests, op (%p) is not a "
704                                 "symmetric operation.", op);
705                 return -EINVAL;
706         }
707 #endif
708         if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
709                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
710                                 " requests, op (%p) is sessionless.", op);
711                 return -EINVAL;
712         }
713
714         if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
715                 PMD_DRV_LOG(ERR, "Session was not created for this device");
716                 return -EINVAL;
717         }
718
719         ctx = (struct qat_session *)op->sym->session->_private;
720         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
721         *qat_req = ctx->fw_req;
722         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
723
724         qat_req->comn_mid.dst_length =
725                 qat_req->comn_mid.src_length =
726                                 rte_pktmbuf_data_len(op->sym->m_src);
727
728         qat_req->comn_mid.dest_data_addr =
729                 qat_req->comn_mid.src_data_addr =
730                             rte_pktmbuf_mtophys(op->sym->m_src);
731
732         if (unlikely(op->sym->m_dst != NULL)) {
733                 qat_req->comn_mid.dest_data_addr =
734                                 rte_pktmbuf_mtophys(op->sym->m_dst);
735                 qat_req->comn_mid.dst_length =
736                                 rte_pktmbuf_data_len(op->sym->m_dst);
737         }
738
739         cipher_param = (void *)&qat_req->serv_specif_rqpars;
740         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
741
742         cipher_param->cipher_length = op->sym->cipher.data.length;
743         cipher_param->cipher_offset = op->sym->cipher.data.offset;
744         if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
745                 if (unlikely((cipher_param->cipher_length % BYTE_LENGTH != 0) ||
746                                 (cipher_param->cipher_offset
747                                         % BYTE_LENGTH != 0))) {
748                         PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
749                                 "supports byte aligned values");
750                         op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
751                         return -EINVAL;
752                 }
753                 cipher_param->cipher_length >>= 3;
754                 cipher_param->cipher_offset >>= 3;
755         }
756
757         if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
758                         sizeof(cipher_param->u.cipher_IV_array))) {
759                 rte_memcpy(cipher_param->u.cipher_IV_array,
760                                 op->sym->cipher.iv.data,
761                                 op->sym->cipher.iv.length);
762         } else {
763                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
764                                 qat_req->comn_hdr.serv_specif_flags,
765                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
766                 cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
767         }
768         if (op->sym->auth.digest.phys_addr) {
769                 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
770                                 qat_req->comn_hdr.serv_specif_flags,
771                                 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
772                 auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
773         }
774         auth_param->auth_off = op->sym->auth.data.offset;
775         auth_param->auth_len = op->sym->auth.data.length;
776         if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
777                 if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0) ||
778                                 (auth_param->auth_len % BYTE_LENGTH != 0))) {
779                         PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
780                                 "supports byte aligned values");
781                         op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
782                         return -EINVAL;
783                 }
784                 auth_param->auth_off >>= 3;
785                 auth_param->auth_len >>= 3;
786         }
787         auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
788         /* (GCM) aad length(240 max) will be at this location after precompute */
789         if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
790                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
791                 struct icp_qat_hw_auth_algo_blk *hash;
792
793                 if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER)
794                         hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&ctx->cd);
795                 else
796                         hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&ctx->cd +
797                                 sizeof(struct icp_qat_hw_cipher_algo_blk));
798
799                 auth_param->u2.aad_sz = ALIGN_POW2_ROUNDUP(hash->sha.state1[
800                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
801                                         ICP_QAT_HW_GALOIS_H_SZ + 3], 16);
802                 if (op->sym->cipher.iv.length == 12) {
803                         /*
804                          * For GCM a 12 bit IV is allowed,
805                          * but we need to inform the f/w
806                          */
807                         ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
808                                 qat_req->comn_hdr.serv_specif_flags,
809                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
810                 }
811         }
812         auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
813
814
815 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
816         rte_hexdump(stdout, "qat_req:", qat_req,
817                         sizeof(struct icp_qat_fw_la_bulk_req));
818         rte_hexdump(stdout, "src_data:",
819                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
820                         rte_pktmbuf_data_len(op->sym->m_src));
821         rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
822                         op->sym->cipher.iv.length);
823         rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
824                         op->sym->auth.digest.length);
825         rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
826                         op->sym->auth.aad.length);
827 #endif
828         return 0;
829 }
830
831 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
832 {
833         uint32_t div = data >> shift;
834         uint32_t mult = div << shift;
835
836         return data - mult;
837 }
838
839 void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess)
840 {
841         struct rte_cryptodev_sym_session *sess = sym_sess;
842         struct qat_session *s = (void *)sess->_private;
843
844         PMD_INIT_FUNC_TRACE();
845         s->cd_paddr = rte_mempool_virt2phy(mp, sess) +
846                 offsetof(struct qat_session, cd) +
847                 offsetof(struct rte_cryptodev_sym_session, _private);
848 }
849
850 int qat_dev_config(__rte_unused struct rte_cryptodev *dev)
851 {
852         PMD_INIT_FUNC_TRACE();
853         return -ENOTSUP;
854 }
855
856 int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
857 {
858         PMD_INIT_FUNC_TRACE();
859         return 0;
860 }
861
862 void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
863 {
864         PMD_INIT_FUNC_TRACE();
865 }
866
867 int qat_dev_close(struct rte_cryptodev *dev)
868 {
869         int i, ret;
870
871         PMD_INIT_FUNC_TRACE();
872
873         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
874                 ret = qat_crypto_sym_qp_release(dev, i);
875                 if (ret < 0)
876                         return ret;
877         }
878
879         return 0;
880 }
881
882 void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
883                                 struct rte_cryptodev_info *info)
884 {
885         struct qat_pmd_private *internals = dev->data->dev_private;
886
887         PMD_INIT_FUNC_TRACE();
888         if (info != NULL) {
889                 info->max_nb_queue_pairs =
890                                 ADF_NUM_SYM_QPS_PER_BUNDLE *
891                                 ADF_NUM_BUNDLES_PER_DEV;
892                 info->feature_flags = dev->feature_flags;
893                 info->capabilities = qat_pmd_capabilities;
894                 info->sym.max_nb_sessions = internals->max_nb_sessions;
895                 info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
896         }
897 }
898
899 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
900                 struct rte_cryptodev_stats *stats)
901 {
902         int i;
903         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
904
905         PMD_INIT_FUNC_TRACE();
906         if (stats == NULL) {
907                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
908                 return;
909         }
910         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
911                 if (qp[i] == NULL) {
912                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
913                         continue;
914                 }
915
916                 stats->enqueued_count += qp[i]->stats.enqueued_count;
917                 stats->dequeued_count += qp[i]->stats.enqueued_count;
918                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
919                 stats->dequeue_err_count += qp[i]->stats.enqueue_err_count;
920         }
921 }
922
923 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
924 {
925         int i;
926         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
927
928         PMD_INIT_FUNC_TRACE();
929         for (i = 0; i < dev->data->nb_queue_pairs; i++)
930                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
931         PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
932 }