a0846926be9fd1183f1bc95652c11b67e8f9134b
[dpdk.git] / drivers / crypto / qat / qat_crypto.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *       * Redistributions of source code must retain the above copyright
12  *         notice, this list of conditions and the following disclaimer.
13  *       * Redistributions in binary form must reproduce the above copyright
14  *         notice, this list of conditions and the following disclaimer in
15  *         the documentation and/or other materials provided with the
16  *         distribution.
17  *       * Neither the name of Intel Corporation nor the names of its
18  *         contributors may be used to endorse or promote products derived
19  *         from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <string.h>
38 #include <inttypes.h>
39 #include <errno.h>
40 #include <sys/queue.h>
41 #include <stdarg.h>
42
43 #include <rte_common.h>
44 #include <rte_log.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_tailq.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_launch.h>
52 #include <rte_eal.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_ring.h>
58 #include <rte_mempool.h>
59 #include <rte_mbuf.h>
60 #include <rte_string_fns.h>
61 #include <rte_spinlock.h>
62 #include <rte_hexdump.h>
63
64 #include "qat_logs.h"
65 #include "qat_algs.h"
66 #include "qat_crypto.h"
67 #include "adf_transport_access_macros.h"
68
69 #define BYTE_LENGTH    8
70
71 static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
72         {       /* SHA1 HMAC */
73                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
74                 {.sym = {
75                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
76                         {.auth = {
77                                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
78                                 .block_size = 64,
79                                 .key_size = {
80                                         .min = 64,
81                                         .max = 64,
82                                         .increment = 0
83                                 },
84                                 .digest_size = {
85                                         .min = 20,
86                                         .max = 20,
87                                         .increment = 0
88                                 },
89                                 .aad_size = { 0 }
90                         }, }
91                 }, }
92         },
93         {       /* SHA256 HMAC */
94                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
95                 {.sym = {
96                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
97                         {.auth = {
98                                 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
99                                 .block_size = 64,
100                                 .key_size = {
101                                         .min = 64,
102                                         .max = 64,
103                                         .increment = 0
104                                 },
105                                 .digest_size = {
106                                         .min = 32,
107                                         .max = 32,
108                                         .increment = 0
109                                 },
110                                 .aad_size = { 0 }
111                         }, }
112                 }, }
113         },
114         {       /* SHA512 HMAC */
115                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
116                 {.sym = {
117                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
118                         {.auth = {
119                                 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
120                                 .block_size = 128,
121                                 .key_size = {
122                                         .min = 128,
123                                         .max = 128,
124                                         .increment = 0
125                                 },
126                                 .digest_size = {
127                                         .min = 64,
128                                         .max = 64,
129                                         .increment = 0
130                                 },
131                                 .aad_size = { 0 }
132                         }, }
133                 }, }
134         },
135         {       /* AES XCBC MAC */
136                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
137                 {.sym = {
138                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
139                         {.auth = {
140                                 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
141                                 .block_size = 16,
142                                 .key_size = {
143                                         .min = 16,
144                                         .max = 16,
145                                         .increment = 0
146                                 },
147                                 .digest_size = {
148                                         .min = 16,
149                                         .max = 16,
150                                         .increment = 0
151                                 },
152                                 .aad_size = { 0 }
153                         }, }
154                 }, }
155         },
156         {       /* AES GCM (AUTH) */
157                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
158                 {.sym = {
159                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
160                         {.auth = {
161                                 .algo = RTE_CRYPTO_AUTH_AES_GCM,
162                                 .block_size = 16,
163                                 .key_size = {
164                                         .min = 16,
165                                         .max = 32,
166                                         .increment = 8
167                                 },
168                                 .digest_size = {
169                                         .min = 8,
170                                         .max = 16,
171                                         .increment = 4
172                                 },
173                                 .aad_size = {
174                                         .min = 8,
175                                         .max = 12,
176                                         .increment = 4
177                                 }
178                         }, }
179                 }, }
180         },
181         {       /* SNOW3G (UIA2) */
182                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
183                 {.sym = {
184                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
185                         {.auth = {
186                                 .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
187                                 .block_size = 16,
188                                 .key_size = {
189                                         .min = 16,
190                                         .max = 16,
191                                         .increment = 0
192                                 },
193                                 .digest_size = {
194                                         .min = 4,
195                                         .max = 4,
196                                         .increment = 0
197                                 },
198                                 .aad_size = {
199                                         .min = 16,
200                                         .max = 16,
201                                         .increment = 0
202                                 }
203                         }, }
204                 }, }
205         },
206         {       /* AES GCM (CIPHER) */
207                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
208                 {.sym = {
209                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
210                         {.cipher = {
211                                 .algo = RTE_CRYPTO_CIPHER_AES_GCM,
212                                 .block_size = 16,
213                                 .key_size = {
214                                         .min = 16,
215                                         .max = 32,
216                                         .increment = 8
217                                 },
218                                 .iv_size = {
219                                         .min = 16,
220                                         .max = 16,
221                                         .increment = 0
222                                 }
223                         }, }
224                 }, }
225         },
226         {       /* AES CBC */
227                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
228                 {.sym = {
229                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
230                         {.cipher = {
231                                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
232                                 .block_size = 16,
233                                 .key_size = {
234                                         .min = 16,
235                                         .max = 32,
236                                         .increment = 8
237                                 },
238                                 .iv_size = {
239                                         .min = 16,
240                                         .max = 16,
241                                         .increment = 0
242                                 }
243                         }, }
244                 }, }
245         },
246         {       /* SNOW3G (UEA2) */
247                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
248                 {.sym = {
249                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
250                         {.cipher = {
251                                 .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
252                                 .block_size = 16,
253                                 .key_size = {
254                                         .min = 16,
255                                         .max = 16,
256                                         .increment = 0
257                                 },
258                                 .iv_size = {
259                                         .min = 16,
260                                         .max = 16,
261                                         .increment = 0
262                                 }
263                         }, }
264                 }, }
265         },
266         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
267 };
268
269 static inline uint32_t
270 adf_modulo(uint32_t data, uint32_t shift);
271
272 static inline int
273 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
274
275 void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
276                 void *session)
277 {
278         struct qat_session *sess = session;
279         phys_addr_t cd_paddr;
280
281         PMD_INIT_FUNC_TRACE();
282         if (session) {
283                 cd_paddr = sess->cd_paddr;
284                 memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
285                 sess->cd_paddr = cd_paddr;
286         } else
287                 PMD_DRV_LOG(ERR, "NULL session");
288 }
289
290 static int
291 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
292 {
293         /* Cipher Only */
294         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
295                 return ICP_QAT_FW_LA_CMD_CIPHER;
296
297         /* Authentication Only */
298         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
299                 return ICP_QAT_FW_LA_CMD_AUTH;
300
301         if (xform->next == NULL)
302                 return -1;
303
304         /* Cipher then Authenticate */
305         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
306                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
307                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
308
309         /* Authenticate then Cipher */
310         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
311                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
312                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
313
314         return -1;
315 }
316
317 static struct rte_crypto_auth_xform *
318 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
319 {
320         do {
321                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
322                         return &xform->auth;
323
324                 xform = xform->next;
325         } while (xform);
326
327         return NULL;
328 }
329
330 static struct rte_crypto_cipher_xform *
331 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
332 {
333         do {
334                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
335                         return &xform->cipher;
336
337                 xform = xform->next;
338         } while (xform);
339
340         return NULL;
341 }
342 void *
343 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
344                 struct rte_crypto_sym_xform *xform, void *session_private)
345 {
346         struct qat_pmd_private *internals = dev->data->dev_private;
347
348         struct qat_session *session = session_private;
349
350         struct rte_crypto_cipher_xform *cipher_xform = NULL;
351
352         /* Get cipher xform from crypto xform chain */
353         cipher_xform = qat_get_cipher_xform(xform);
354
355         switch (cipher_xform->algo) {
356         case RTE_CRYPTO_CIPHER_AES_CBC:
357                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
358                                 &session->qat_cipher_alg) != 0) {
359                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
360                         goto error_out;
361                 }
362                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
363                 break;
364         case RTE_CRYPTO_CIPHER_AES_GCM:
365                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
366                                 &session->qat_cipher_alg) != 0) {
367                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
368                         goto error_out;
369                 }
370                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
371                 break;
372         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
373                 if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
374                                         &session->qat_cipher_alg) != 0) {
375                         PMD_DRV_LOG(ERR, "Invalid SNOW3G cipher key size");
376                         goto error_out;
377                 }
378                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
379                 break;
380         case RTE_CRYPTO_CIPHER_NULL:
381         case RTE_CRYPTO_CIPHER_3DES_ECB:
382         case RTE_CRYPTO_CIPHER_3DES_CBC:
383         case RTE_CRYPTO_CIPHER_AES_ECB:
384         case RTE_CRYPTO_CIPHER_AES_CTR:
385         case RTE_CRYPTO_CIPHER_AES_CCM:
386         case RTE_CRYPTO_CIPHER_KASUMI_F8:
387                 PMD_DRV_LOG(ERR, "Crypto: Unsupported Cipher alg %u",
388                                 cipher_xform->algo);
389                 goto error_out;
390         default:
391                 PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
392                                 cipher_xform->algo);
393                 goto error_out;
394         }
395
396         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
397                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
398         else
399                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
400
401         if (qat_alg_aead_session_create_content_desc_cipher(session,
402                                                 cipher_xform->key.data,
403                                                 cipher_xform->key.length))
404                 goto error_out;
405
406         return session;
407
408 error_out:
409         rte_mempool_put(internals->sess_mp, session);
410         return NULL;
411 }
412
413
414 void *
415 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
416                 struct rte_crypto_sym_xform *xform, void *session_private)
417 {
418         struct qat_pmd_private *internals = dev->data->dev_private;
419
420         struct qat_session *session = session_private;
421
422         int qat_cmd_id;
423
424         PMD_INIT_FUNC_TRACE();
425
426         /* Get requested QAT command id */
427         qat_cmd_id = qat_get_cmd_id(xform);
428         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
429                 PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
430                 goto error_out;
431         }
432         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
433         switch (session->qat_cmd) {
434         case ICP_QAT_FW_LA_CMD_CIPHER:
435         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
436                 break;
437         case ICP_QAT_FW_LA_CMD_AUTH:
438         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
439                 break;
440         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
441         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
442         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
443                 break;
444         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
445         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
446         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
447                 break;
448         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
449         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
450         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
451         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
452         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
453         case ICP_QAT_FW_LA_CMD_MGF1:
454         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
455         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
456         case ICP_QAT_FW_LA_CMD_DELIMITER:
457         PMD_DRV_LOG(ERR, "Unsupported Service %u",
458                 session->qat_cmd);
459                 goto error_out;
460         default:
461         PMD_DRV_LOG(ERR, "Unsupported Service %u",
462                 session->qat_cmd);
463                 goto error_out;
464         }
465         return session;
466
467 error_out:
468         rte_mempool_put(internals->sess_mp, session);
469         return NULL;
470 }
471
472 struct qat_session *
473 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
474                                 struct rte_crypto_sym_xform *xform,
475                                 struct qat_session *session_private)
476 {
477
478         struct qat_pmd_private *internals = dev->data->dev_private;
479         struct qat_session *session = session_private;
480         struct rte_crypto_auth_xform *auth_xform = NULL;
481         struct rte_crypto_cipher_xform *cipher_xform = NULL;
482         auth_xform = qat_get_auth_xform(xform);
483
484         switch (auth_xform->algo) {
485         case RTE_CRYPTO_AUTH_SHA1_HMAC:
486                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
487                 break;
488         case RTE_CRYPTO_AUTH_SHA256_HMAC:
489                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
490                 break;
491         case RTE_CRYPTO_AUTH_SHA512_HMAC:
492                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
493                 break;
494         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
495                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
496                 break;
497         case RTE_CRYPTO_AUTH_AES_GCM:
498                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
499                 break;
500         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
501                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
502                 break;
503         case RTE_CRYPTO_AUTH_NULL:
504         case RTE_CRYPTO_AUTH_SHA1:
505         case RTE_CRYPTO_AUTH_SHA256:
506         case RTE_CRYPTO_AUTH_SHA512:
507         case RTE_CRYPTO_AUTH_SHA224:
508         case RTE_CRYPTO_AUTH_SHA224_HMAC:
509         case RTE_CRYPTO_AUTH_SHA384:
510         case RTE_CRYPTO_AUTH_SHA384_HMAC:
511         case RTE_CRYPTO_AUTH_MD5:
512         case RTE_CRYPTO_AUTH_MD5_HMAC:
513         case RTE_CRYPTO_AUTH_AES_CCM:
514         case RTE_CRYPTO_AUTH_AES_GMAC:
515         case RTE_CRYPTO_AUTH_KASUMI_F9:
516         case RTE_CRYPTO_AUTH_AES_CMAC:
517         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
518         case RTE_CRYPTO_AUTH_ZUC_EIA3:
519                 PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
520                                 auth_xform->algo);
521                 goto error_out;
522         default:
523                 PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
524                                 auth_xform->algo);
525                 goto error_out;
526         }
527         cipher_xform = qat_get_cipher_xform(xform);
528
529         if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
530                         (session->qat_hash_alg ==
531                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_64))  {
532                 if (qat_alg_aead_session_create_content_desc_auth(session,
533                                 cipher_xform->key.data,
534                                 cipher_xform->key.length,
535                                 auth_xform->add_auth_data_length,
536                                 auth_xform->digest_length))
537                         goto error_out;
538         } else {
539                 if (qat_alg_aead_session_create_content_desc_auth(session,
540                                 auth_xform->key.data,
541                                 auth_xform->key.length,
542                                 auth_xform->add_auth_data_length,
543                                 auth_xform->digest_length))
544                         goto error_out;
545         }
546         return session;
547
548 error_out:
549         rte_mempool_put(internals->sess_mp, session);
550         return NULL;
551 }
552
553 unsigned qat_crypto_sym_get_session_private_size(
554                 struct rte_cryptodev *dev __rte_unused)
555 {
556         return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
557 }
558
559
560 uint16_t
561 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
562                 uint16_t nb_ops)
563 {
564         register struct qat_queue *queue;
565         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
566         register uint32_t nb_ops_sent = 0;
567         register struct rte_crypto_op **cur_op = ops;
568         register int ret;
569         uint16_t nb_ops_possible = nb_ops;
570         register uint8_t *base_addr;
571         register uint32_t tail;
572         int overflow;
573
574         if (unlikely(nb_ops == 0))
575                 return 0;
576
577         /* read params used a lot in main loop into registers */
578         queue = &(tmp_qp->tx_q);
579         base_addr = (uint8_t *)queue->base_addr;
580         tail = queue->tail;
581
582         /* Find how many can actually fit on the ring */
583         overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
584                                 - queue->max_inflights;
585         if (overflow > 0) {
586                 rte_atomic16_sub(&tmp_qp->inflights16, overflow);
587                 nb_ops_possible = nb_ops - overflow;
588                 if (nb_ops_possible == 0)
589                         return 0;
590         }
591
592         while (nb_ops_sent != nb_ops_possible) {
593                 ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
594                 if (ret != 0) {
595                         tmp_qp->stats.enqueue_err_count++;
596                         if (nb_ops_sent == 0)
597                                 return 0;
598                         goto kick_tail;
599                 }
600
601                 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
602                 nb_ops_sent++;
603                 cur_op++;
604         }
605 kick_tail:
606         WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
607                         queue->hw_queue_number, tail);
608         queue->tail = tail;
609         tmp_qp->stats.enqueued_count += nb_ops_sent;
610         return nb_ops_sent;
611 }
612
613 uint16_t
614 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
615                 uint16_t nb_ops)
616 {
617         struct qat_queue *queue;
618         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
619         uint32_t msg_counter = 0;
620         struct rte_crypto_op *rx_op;
621         struct icp_qat_fw_comn_resp *resp_msg;
622
623         queue = &(tmp_qp->rx_q);
624         resp_msg = (struct icp_qat_fw_comn_resp *)
625                         ((uint8_t *)queue->base_addr + queue->head);
626
627         while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
628                         msg_counter != nb_ops) {
629                 rx_op = (struct rte_crypto_op *)(uintptr_t)
630                                 (resp_msg->opaque_data);
631
632 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
633                 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
634                                 sizeof(struct icp_qat_fw_comn_resp));
635 #endif
636                 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
637                                 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
638                                         resp_msg->comn_hdr.comn_status)) {
639                         rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
640                 } else {
641                         rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
642                 }
643                 *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
644                 queue->head = adf_modulo(queue->head +
645                                 queue->msg_size,
646                                 ADF_RING_SIZE_MODULO(queue->queue_size));
647                 resp_msg = (struct icp_qat_fw_comn_resp *)
648                                         ((uint8_t *)queue->base_addr +
649                                                         queue->head);
650                 *ops = rx_op;
651                 ops++;
652                 msg_counter++;
653         }
654         if (msg_counter > 0) {
655                 WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
656                                         queue->hw_bundle_number,
657                                         queue->hw_queue_number, queue->head);
658                 rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
659                 tmp_qp->stats.dequeued_count += msg_counter;
660         }
661         return msg_counter;
662 }
663
664 static inline int
665 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
666 {
667         struct qat_session *ctx;
668         struct icp_qat_fw_la_cipher_req_params *cipher_param;
669         struct icp_qat_fw_la_auth_req_params *auth_param;
670         register struct icp_qat_fw_la_bulk_req *qat_req;
671
672 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
673         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
674                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
675                                 "operation requests, op (%p) is not a "
676                                 "symmetric operation.", op);
677                 return -EINVAL;
678         }
679 #endif
680         if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
681                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
682                                 " requests, op (%p) is sessionless.", op);
683                 return -EINVAL;
684         }
685
686         if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
687                 PMD_DRV_LOG(ERR, "Session was not created for this device");
688                 return -EINVAL;
689         }
690
691         ctx = (struct qat_session *)op->sym->session->_private;
692         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
693         *qat_req = ctx->fw_req;
694         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
695
696         qat_req->comn_mid.dst_length =
697                 qat_req->comn_mid.src_length =
698                                 rte_pktmbuf_data_len(op->sym->m_src);
699
700         qat_req->comn_mid.dest_data_addr =
701                 qat_req->comn_mid.src_data_addr =
702                             rte_pktmbuf_mtophys(op->sym->m_src);
703
704         if (unlikely(op->sym->m_dst != NULL)) {
705                 qat_req->comn_mid.dest_data_addr =
706                                 rte_pktmbuf_mtophys(op->sym->m_dst);
707                 qat_req->comn_mid.dst_length =
708                                 rte_pktmbuf_data_len(op->sym->m_dst);
709         }
710
711         cipher_param = (void *)&qat_req->serv_specif_rqpars;
712         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
713
714         cipher_param->cipher_length = op->sym->cipher.data.length;
715         cipher_param->cipher_offset = op->sym->cipher.data.offset;
716         if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
717                 if (unlikely((cipher_param->cipher_length % BYTE_LENGTH != 0) ||
718                                 (cipher_param->cipher_offset
719                                         % BYTE_LENGTH != 0))) {
720                         PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
721                                 "supports byte aligned values");
722                         op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
723                         return -EINVAL;
724                 }
725                 cipher_param->cipher_length >>= 3;
726                 cipher_param->cipher_offset >>= 3;
727         }
728
729         if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
730                         sizeof(cipher_param->u.cipher_IV_array))) {
731                 rte_memcpy(cipher_param->u.cipher_IV_array,
732                                 op->sym->cipher.iv.data,
733                                 op->sym->cipher.iv.length);
734         } else {
735                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
736                                 qat_req->comn_hdr.serv_specif_flags,
737                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
738                 cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
739         }
740         if (op->sym->auth.digest.phys_addr) {
741                 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
742                                 qat_req->comn_hdr.serv_specif_flags,
743                                 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
744                 auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
745         }
746         auth_param->auth_off = op->sym->auth.data.offset;
747         auth_param->auth_len = op->sym->auth.data.length;
748         if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
749                 if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0) ||
750                                 (auth_param->auth_len % BYTE_LENGTH != 0))) {
751                         PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
752                                 "supports byte aligned values");
753                         op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
754                         return -EINVAL;
755                 }
756                 auth_param->auth_off >>= 3;
757                 auth_param->auth_len >>= 3;
758         }
759         auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
760         /* (GCM) aad length(240 max) will be at this location after precompute */
761         if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
762                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
763                 struct icp_qat_hw_auth_algo_blk *hash;
764
765                 if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER)
766                         hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&ctx->cd);
767                 else
768                         hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&ctx->cd +
769                                 sizeof(struct icp_qat_hw_cipher_algo_blk));
770
771                 auth_param->u2.aad_sz = ALIGN_POW2_ROUNDUP(hash->sha.state1[
772                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
773                                         ICP_QAT_HW_GALOIS_H_SZ + 3], 16);
774                 if (op->sym->cipher.iv.length == 12) {
775                         /*
776                          * For GCM a 12 bit IV is allowed,
777                          * but we need to inform the f/w
778                          */
779                         ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
780                                 qat_req->comn_hdr.serv_specif_flags,
781                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
782                 }
783         }
784         auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
785
786
787 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
788         rte_hexdump(stdout, "qat_req:", qat_req,
789                         sizeof(struct icp_qat_fw_la_bulk_req));
790         rte_hexdump(stdout, "src_data:",
791                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
792                         rte_pktmbuf_data_len(op->sym->m_src));
793         rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
794                         op->sym->cipher.iv.length);
795         rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
796                         op->sym->auth.digest.length);
797         rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
798                         op->sym->auth.aad.length);
799 #endif
800         return 0;
801 }
802
803 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
804 {
805         uint32_t div = data >> shift;
806         uint32_t mult = div << shift;
807
808         return data - mult;
809 }
810
811 void qat_crypto_sym_session_init(struct rte_mempool *mp, void *priv_sess)
812 {
813         struct qat_session *s = priv_sess;
814
815         PMD_INIT_FUNC_TRACE();
816         s->cd_paddr = rte_mempool_virt2phy(mp, &s->cd);
817 }
818
819 int qat_dev_config(__rte_unused struct rte_cryptodev *dev)
820 {
821         PMD_INIT_FUNC_TRACE();
822         return -ENOTSUP;
823 }
824
825 int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
826 {
827         PMD_INIT_FUNC_TRACE();
828         return 0;
829 }
830
831 void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
832 {
833         PMD_INIT_FUNC_TRACE();
834 }
835
836 int qat_dev_close(struct rte_cryptodev *dev)
837 {
838         int i, ret;
839
840         PMD_INIT_FUNC_TRACE();
841
842         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
843                 ret = qat_crypto_sym_qp_release(dev, i);
844                 if (ret < 0)
845                         return ret;
846         }
847
848         return 0;
849 }
850
851 void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
852                                 struct rte_cryptodev_info *info)
853 {
854         struct qat_pmd_private *internals = dev->data->dev_private;
855
856         PMD_INIT_FUNC_TRACE();
857         if (info != NULL) {
858                 info->max_nb_queue_pairs =
859                                 ADF_NUM_SYM_QPS_PER_BUNDLE *
860                                 ADF_NUM_BUNDLES_PER_DEV;
861                 info->feature_flags = dev->feature_flags;
862                 info->capabilities = qat_pmd_capabilities;
863                 info->sym.max_nb_sessions = internals->max_nb_sessions;
864                 info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
865         }
866 }
867
868 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
869                 struct rte_cryptodev_stats *stats)
870 {
871         int i;
872         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
873
874         PMD_INIT_FUNC_TRACE();
875         if (stats == NULL) {
876                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
877                 return;
878         }
879         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
880                 if (qp[i] == NULL) {
881                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
882                         continue;
883                 }
884
885                 stats->enqueued_count += qp[i]->stats.enqueued_count;
886                 stats->dequeued_count += qp[i]->stats.enqueued_count;
887                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
888                 stats->dequeue_err_count += qp[i]->stats.enqueue_err_count;
889         }
890 }
891
892 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
893 {
894         int i;
895         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
896
897         PMD_INIT_FUNC_TRACE();
898         for (i = 0; i < dev->data->nb_queue_pairs; i++)
899                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
900         PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
901 }