cryptodev: add capabilities discovery
[dpdk.git] / drivers / crypto / qat / qat_crypto.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *       * Redistributions of source code must retain the above copyright
12  *         notice, this list of conditions and the following disclaimer.
13  *       * Redistributions in binary form must reproduce the above copyright
14  *         notice, this list of conditions and the following disclaimer in
15  *         the documentation and/or other materials provided with the
16  *         distribution.
17  *       * Neither the name of Intel Corporation nor the names of its
18  *         contributors may be used to endorse or promote products derived
19  *         from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <string.h>
38 #include <inttypes.h>
39 #include <errno.h>
40 #include <sys/queue.h>
41 #include <stdarg.h>
42
43 #include <rte_common.h>
44 #include <rte_log.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_tailq.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_launch.h>
52 #include <rte_eal.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_ring.h>
58 #include <rte_mempool.h>
59 #include <rte_mbuf.h>
60 #include <rte_string_fns.h>
61 #include <rte_spinlock.h>
62 #include <rte_hexdump.h>
63
64 #include "qat_logs.h"
65 #include "qat_algs.h"
66 #include "qat_crypto.h"
67 #include "adf_transport_access_macros.h"
68
69 #define BYTE_LENGTH    8
70
71 static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
72         {       /* SHA1 HMAC */
73                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
74                 .sym = {
75                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
76                         .auth = {
77                                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
78                                 .block_size = 64,
79                                 .key_size = {
80                                         .min = 64,
81                                         .max = 64,
82                                         .increment = 0
83                                 },
84                                 .digest_size = {
85                                         .min = 20,
86                                         .max = 20,
87                                         .increment = 0
88                                 },
89                                 .aad_size = { 0 }
90                         }
91                 }
92         },
93         {       /* SHA256 HMAC */
94                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
95                 .sym = {
96                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
97                         .auth = {
98                                 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
99                                 .block_size = 64,
100                                 .key_size = {
101                                         .min = 64,
102                                         .max = 64,
103                                         .increment = 0
104                                 },
105                                 .digest_size = {
106                                         .min = 32,
107                                         .max = 32,
108                                         .increment = 0
109                                 },
110                                 .aad_size = { 0 }
111                         }
112                 }
113         },
114         {       /* SHA512 HMAC */
115                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
116                 .sym = {
117                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
118                         .auth = {
119                                 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
120                                 .block_size = 128,
121                                 .key_size = {
122                                         .min = 128,
123                                         .max = 128,
124                                         .increment = 0
125                                 },
126                                 .digest_size = {
127                                         .min = 64,
128                                         .max = 64,
129                                         .increment = 0
130                                 },
131                                 .aad_size = { 0 }
132                         }
133                 }
134         },
135         {       /* AES XCBC MAC */
136                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
137                 .sym = {
138                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
139                         .auth = {
140                                 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
141                                 .block_size = 16,
142                                 .key_size = {
143                                         .min = 16,
144                                         .max = 16,
145                                         .increment = 0
146                                 },
147                                 .digest_size = {
148                                         .min = 16,
149                                         .max = 16,
150                                         .increment = 0
151                                 },
152                                 .aad_size = { 0 }
153                         }
154                 }
155         },
156         {       /* AES GCM (AUTH) */
157                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
158                 .sym = {
159                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
160                         .auth = {
161                                 .algo = RTE_CRYPTO_AUTH_AES_GCM,
162                                 .block_size = 16,
163                                 .key_size = {
164                                         .min = 16,
165                                         .max = 32,
166                                         .increment = 8
167                                 },
168                                 .digest_size = {
169                                         .min = 8,
170                                         .max = 16,
171                                         .increment = 4
172                                 },
173                                 .aad_size = {
174                                         .min = 8,
175                                         .max = 12,
176                                         .increment = 4
177                                 }
178                         }
179                 }
180         },
181         {       /* SNOW3G (UIA2) */
182                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
183                 .sym = {
184                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
185                         .auth = {
186                                 .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
187                                 .block_size = 16,
188                                 .key_size = {
189                                         .min = 16,
190                                         .max = 16,
191                                         .increment = 0
192                                 },
193                                 .digest_size = {
194                                         .min = 4,
195                                         .max = 4,
196                                         .increment = 0
197                                 },
198                                 .aad_size = {
199                                         .min = 16,
200                                         .max = 16,
201                                         .increment = 0
202                                 }
203                         }
204                 }
205         },
206         {       /* AES GCM (CIPHER) */
207                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
208                 .sym = {
209                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
210                         .cipher = {
211                                 .algo = RTE_CRYPTO_CIPHER_AES_GCM,
212                                 .block_size = 16,
213                                 .key_size = {
214                                         .min = 16,
215                                         .max = 32,
216                                         .increment = 8
217                                 },
218                                 .iv_size = {
219                                         .min = 16,
220                                         .max = 16,
221                                         .increment = 0
222                                 }
223                         }
224                 }
225         },
226         {       /* AES CBC */
227                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
228                 .sym = {
229                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
230                         .cipher = {
231                                 RTE_CRYPTO_CIPHER_AES_CBC,
232                                 .block_size = 16,
233                                 .key_size = {
234                                         .min = 16,
235                                         .max = 32,
236                                         .increment = 8
237                                 },
238                                 .iv_size = {
239                                         .min = 16,
240                                         .max = 16,
241                                         .increment = 0
242                                 }
243                         }
244                 }
245         },
246         {       /* SNOW3G (UEA2) */
247                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
248                 .sym = {
249                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
250                         .cipher = {
251                                 .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
252                                 .block_size = 16,
253                                 .key_size = {
254                                         .min = 16,
255                                         .max = 16,
256                                         .increment = 0
257                                 },
258                                 .iv_size = {
259                                         .min = 16,
260                                         .max = 16,
261                                         .increment = 0
262                                 }
263                         }
264                 }
265         },
266         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
267 };
268
269 static inline uint32_t
270 adf_modulo(uint32_t data, uint32_t shift);
271
272 static inline int
273 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
274
275 void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
276                 void *session)
277 {
278         struct qat_session *sess = session;
279         phys_addr_t cd_paddr = sess->cd_paddr;
280
281         PMD_INIT_FUNC_TRACE();
282         if (session) {
283                 memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
284
285                 sess->cd_paddr = cd_paddr;
286         }
287 }
288
289 static int
290 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
291 {
292         /* Cipher Only */
293         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
294                 return ICP_QAT_FW_LA_CMD_CIPHER;
295
296         /* Authentication Only */
297         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
298                 return ICP_QAT_FW_LA_CMD_AUTH;
299
300         if (xform->next == NULL)
301                 return -1;
302
303         /* Cipher then Authenticate */
304         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
305                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
306                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
307
308         /* Authenticate then Cipher */
309         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
310                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
311                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
312
313         return -1;
314 }
315
316 static struct rte_crypto_auth_xform *
317 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
318 {
319         do {
320                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
321                         return &xform->auth;
322
323                 xform = xform->next;
324         } while (xform);
325
326         return NULL;
327 }
328
329 static struct rte_crypto_cipher_xform *
330 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
331 {
332         do {
333                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
334                         return &xform->cipher;
335
336                 xform = xform->next;
337         } while (xform);
338
339         return NULL;
340 }
341 void *
342 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
343                 struct rte_crypto_sym_xform *xform, void *session_private)
344 {
345         struct qat_pmd_private *internals = dev->data->dev_private;
346
347         struct qat_session *session = session_private;
348
349         struct rte_crypto_cipher_xform *cipher_xform = NULL;
350
351         /* Get cipher xform from crypto xform chain */
352         cipher_xform = qat_get_cipher_xform(xform);
353
354         switch (cipher_xform->algo) {
355         case RTE_CRYPTO_CIPHER_AES_CBC:
356                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
357                                 &session->qat_cipher_alg) != 0) {
358                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
359                         goto error_out;
360                 }
361                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
362                 break;
363         case RTE_CRYPTO_CIPHER_AES_GCM:
364                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
365                                 &session->qat_cipher_alg) != 0) {
366                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
367                         goto error_out;
368                 }
369                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
370                 break;
371         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
372                 if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
373                                         &session->qat_cipher_alg) != 0) {
374                         PMD_DRV_LOG(ERR, "Invalid SNOW3G cipher key size");
375                         goto error_out;
376                 }
377                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
378                 break;
379         case RTE_CRYPTO_CIPHER_NULL:
380         case RTE_CRYPTO_CIPHER_3DES_ECB:
381         case RTE_CRYPTO_CIPHER_3DES_CBC:
382         case RTE_CRYPTO_CIPHER_AES_ECB:
383         case RTE_CRYPTO_CIPHER_AES_CTR:
384         case RTE_CRYPTO_CIPHER_AES_CCM:
385         case RTE_CRYPTO_CIPHER_KASUMI_F8:
386                 PMD_DRV_LOG(ERR, "Crypto: Unsupported Cipher alg %u",
387                                 cipher_xform->algo);
388                 goto error_out;
389         default:
390                 PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
391                                 cipher_xform->algo);
392                 goto error_out;
393         }
394
395         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
396                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
397         else
398                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
399
400         if (qat_alg_aead_session_create_content_desc_cipher(session,
401                                                 cipher_xform->key.data,
402                                                 cipher_xform->key.length))
403                 goto error_out;
404
405         return session;
406
407 error_out:
408         rte_mempool_put(internals->sess_mp, session);
409         return NULL;
410 }
411
412
413 void *
414 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
415                 struct rte_crypto_sym_xform *xform, void *session_private)
416 {
417         struct qat_pmd_private *internals = dev->data->dev_private;
418
419         struct qat_session *session = session_private;
420
421         int qat_cmd_id;
422
423         PMD_INIT_FUNC_TRACE();
424
425         /* Get requested QAT command id */
426         qat_cmd_id = qat_get_cmd_id(xform);
427         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
428                 PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
429                 goto error_out;
430         }
431         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
432         switch (session->qat_cmd) {
433         case ICP_QAT_FW_LA_CMD_CIPHER:
434         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
435                 break;
436         case ICP_QAT_FW_LA_CMD_AUTH:
437         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
438                 break;
439         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
440         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
441         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
442                 break;
443         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
444         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
445         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
446                 break;
447         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
448         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
449         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
450         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
451         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
452         case ICP_QAT_FW_LA_CMD_MGF1:
453         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
454         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
455         case ICP_QAT_FW_LA_CMD_DELIMITER:
456         PMD_DRV_LOG(ERR, "Unsupported Service %u",
457                 session->qat_cmd);
458                 goto error_out;
459         default:
460         PMD_DRV_LOG(ERR, "Unsupported Service %u",
461                 session->qat_cmd);
462                 goto error_out;
463         }
464         return session;
465
466 error_out:
467         rte_mempool_put(internals->sess_mp, session);
468         return NULL;
469 }
470
471 struct qat_session *
472 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
473                                 struct rte_crypto_sym_xform *xform,
474                                 struct qat_session *session_private)
475 {
476
477         struct qat_pmd_private *internals = dev->data->dev_private;
478         struct qat_session *session = session_private;
479         struct rte_crypto_auth_xform *auth_xform = NULL;
480         struct rte_crypto_cipher_xform *cipher_xform = NULL;
481         auth_xform = qat_get_auth_xform(xform);
482
483         switch (auth_xform->algo) {
484         case RTE_CRYPTO_AUTH_SHA1_HMAC:
485                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
486                 break;
487         case RTE_CRYPTO_AUTH_SHA256_HMAC:
488                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
489                 break;
490         case RTE_CRYPTO_AUTH_SHA512_HMAC:
491                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
492                 break;
493         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
494                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
495                 break;
496         case RTE_CRYPTO_AUTH_AES_GCM:
497                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
498                 break;
499         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
500                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
501                 break;
502         case RTE_CRYPTO_AUTH_NULL:
503         case RTE_CRYPTO_AUTH_SHA1:
504         case RTE_CRYPTO_AUTH_SHA256:
505         case RTE_CRYPTO_AUTH_SHA512:
506         case RTE_CRYPTO_AUTH_SHA224:
507         case RTE_CRYPTO_AUTH_SHA224_HMAC:
508         case RTE_CRYPTO_AUTH_SHA384:
509         case RTE_CRYPTO_AUTH_SHA384_HMAC:
510         case RTE_CRYPTO_AUTH_MD5:
511         case RTE_CRYPTO_AUTH_MD5_HMAC:
512         case RTE_CRYPTO_AUTH_AES_CCM:
513         case RTE_CRYPTO_AUTH_AES_GMAC:
514         case RTE_CRYPTO_AUTH_KASUMI_F9:
515         case RTE_CRYPTO_AUTH_AES_CMAC:
516         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
517         case RTE_CRYPTO_AUTH_ZUC_EIA3:
518                 PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
519                                 auth_xform->algo);
520                 goto error_out;
521         default:
522                 PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
523                                 auth_xform->algo);
524                 goto error_out;
525         }
526         cipher_xform = qat_get_cipher_xform(xform);
527
528         if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
529                         (session->qat_hash_alg ==
530                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_64))  {
531                 if (qat_alg_aead_session_create_content_desc_auth(session,
532                                 cipher_xform->key.data,
533                                 cipher_xform->key.length,
534                                 auth_xform->add_auth_data_length,
535                                 auth_xform->digest_length))
536                         goto error_out;
537         } else {
538                 if (qat_alg_aead_session_create_content_desc_auth(session,
539                                 auth_xform->key.data,
540                                 auth_xform->key.length,
541                                 auth_xform->add_auth_data_length,
542                                 auth_xform->digest_length))
543                         goto error_out;
544         }
545         return session;
546
547 error_out:
548         rte_mempool_put(internals->sess_mp, session);
549         return NULL;
550 }
551
552 unsigned qat_crypto_sym_get_session_private_size(
553                 struct rte_cryptodev *dev __rte_unused)
554 {
555         return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
556 }
557
558
559 uint16_t
560 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
561                 uint16_t nb_ops)
562 {
563         register struct qat_queue *queue;
564         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
565         register uint32_t nb_ops_sent = 0;
566         register struct rte_crypto_op **cur_op = ops;
567         register int ret;
568         uint16_t nb_ops_possible = nb_ops;
569         register uint8_t *base_addr;
570         register uint32_t tail;
571         int overflow;
572
573         /* read params used a lot in main loop into registers */
574         queue = &(tmp_qp->tx_q);
575         base_addr = (uint8_t *)queue->base_addr;
576         tail = queue->tail;
577
578         /* Find how many can actually fit on the ring */
579         overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
580                                 - queue->max_inflights;
581         if (overflow > 0) {
582                 rte_atomic16_sub(&tmp_qp->inflights16, overflow);
583                 nb_ops_possible = nb_ops - overflow;
584                 if (nb_ops_possible == 0)
585                         return 0;
586         }
587
588         while (nb_ops_sent != nb_ops_possible) {
589                 ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
590                 if (ret != 0) {
591                         tmp_qp->stats.enqueue_err_count++;
592                         if (nb_ops_sent == 0)
593                                 return 0;
594                         goto kick_tail;
595                 }
596
597                 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
598                 nb_ops_sent++;
599                 cur_op++;
600         }
601 kick_tail:
602         WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
603                         queue->hw_queue_number, tail);
604         queue->tail = tail;
605         tmp_qp->stats.enqueued_count += nb_ops_sent;
606         return nb_ops_sent;
607 }
608
609 uint16_t
610 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
611                 uint16_t nb_ops)
612 {
613         struct qat_queue *queue;
614         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
615         uint32_t msg_counter = 0;
616         struct rte_crypto_op *rx_op;
617         struct icp_qat_fw_comn_resp *resp_msg;
618
619         queue = &(tmp_qp->rx_q);
620         resp_msg = (struct icp_qat_fw_comn_resp *)
621                         ((uint8_t *)queue->base_addr + queue->head);
622
623         while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
624                         msg_counter != nb_ops) {
625                 rx_op = (struct rte_crypto_op *)(uintptr_t)
626                                 (resp_msg->opaque_data);
627
628 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
629                 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
630                                 sizeof(struct icp_qat_fw_comn_resp));
631 #endif
632                 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
633                                 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
634                                         resp_msg->comn_hdr.comn_status)) {
635                         rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
636                 } else {
637                         rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
638                 }
639                 *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
640                 queue->head = adf_modulo(queue->head +
641                                 queue->msg_size,
642                                 ADF_RING_SIZE_MODULO(queue->queue_size));
643                 resp_msg = (struct icp_qat_fw_comn_resp *)
644                                         ((uint8_t *)queue->base_addr +
645                                                         queue->head);
646                 *ops = rx_op;
647                 ops++;
648                 msg_counter++;
649         }
650         if (msg_counter > 0) {
651                 WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
652                                         queue->hw_bundle_number,
653                                         queue->hw_queue_number, queue->head);
654                 rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
655                 tmp_qp->stats.dequeued_count += msg_counter;
656         }
657         return msg_counter;
658 }
659
660 static inline int
661 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
662 {
663         struct qat_session *ctx;
664         struct icp_qat_fw_la_cipher_req_params *cipher_param;
665         struct icp_qat_fw_la_auth_req_params *auth_param;
666         register struct icp_qat_fw_la_bulk_req *qat_req;
667
668 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
669         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
670                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
671                                 "operation requests, op (%p) is not a "
672                                 "symmetric operation.", op);
673                 return -EINVAL;
674         }
675 #endif
676         if (unlikely(op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
677                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
678                                 " requests, op (%p) is sessionless.", op);
679                 return -EINVAL;
680         }
681
682         if (unlikely(op->sym->session->type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
683                 PMD_DRV_LOG(ERR, "Session was not created for this device");
684                 return -EINVAL;
685         }
686
687         ctx = (struct qat_session *)op->sym->session->_private;
688         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
689         *qat_req = ctx->fw_req;
690         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
691
692         /*
693          * The following code assumes:
694          * - single entry buffer.
695          * - always in place.
696          */
697         qat_req->comn_mid.dst_length =
698                         qat_req->comn_mid.src_length =
699                                         rte_pktmbuf_data_len(op->sym->m_src);
700         qat_req->comn_mid.dest_data_addr =
701                         qat_req->comn_mid.src_data_addr =
702                                         rte_pktmbuf_mtophys(op->sym->m_src);
703         cipher_param = (void *)&qat_req->serv_specif_rqpars;
704         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
705
706         cipher_param->cipher_length = op->sym->cipher.data.length;
707         cipher_param->cipher_offset = op->sym->cipher.data.offset;
708         if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
709                 if (unlikely((cipher_param->cipher_length % BYTE_LENGTH != 0) ||
710                                 (cipher_param->cipher_offset
711                                         % BYTE_LENGTH != 0))) {
712                         PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
713                                 "supports byte aligned values");
714                         op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
715                         return -EINVAL;
716                 }
717                 cipher_param->cipher_length >>= 3;
718                 cipher_param->cipher_offset >>= 3;
719         }
720
721         if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
722                         sizeof(cipher_param->u.cipher_IV_array))) {
723                 rte_memcpy(cipher_param->u.cipher_IV_array,
724                                 op->sym->cipher.iv.data,
725                                 op->sym->cipher.iv.length);
726         } else {
727                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
728                                 qat_req->comn_hdr.serv_specif_flags,
729                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
730                 cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
731         }
732         if (op->sym->auth.digest.phys_addr) {
733                 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
734                                 qat_req->comn_hdr.serv_specif_flags,
735                                 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
736                 auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
737         }
738         auth_param->auth_off = op->sym->auth.data.offset;
739         auth_param->auth_len = op->sym->auth.data.length;
740         if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
741                 if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0) ||
742                                 (auth_param->auth_len % BYTE_LENGTH != 0))) {
743                         PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
744                                 "supports byte aligned values");
745                         op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
746                         return -EINVAL;
747                 }
748                 auth_param->auth_off >>= 3;
749                 auth_param->auth_len >>= 3;
750         }
751         auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
752         /* (GCM) aad length(240 max) will be at this location after precompute */
753         if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
754                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
755                 struct icp_qat_hw_auth_algo_blk *hash;
756
757                 if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER)
758                         hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&ctx->cd);
759                 else
760                         hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&ctx->cd +
761                                 sizeof(struct icp_qat_hw_cipher_algo_blk));
762
763                 auth_param->u2.aad_sz = ALIGN_POW2_ROUNDUP(hash->sha.state1[
764                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
765                                         ICP_QAT_HW_GALOIS_H_SZ + 3], 16);
766                 if (op->sym->cipher.iv.length == 12) {
767                         /*
768                          * For GCM a 12 bit IV is allowed,
769                          * but we need to inform the f/w
770                          */
771                         ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
772                                 qat_req->comn_hdr.serv_specif_flags,
773                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
774                 }
775         }
776         auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
777
778
779 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
780         rte_hexdump(stdout, "qat_req:", qat_req,
781                         sizeof(struct icp_qat_fw_la_bulk_req));
782         rte_hexdump(stdout, "src_data:",
783                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
784                         rte_pktmbuf_data_len(op->sym->m_src));
785         rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
786                         op->sym->cipher.iv.length);
787         rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
788                         op->sym->auth.digest.length);
789         rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
790                         op->sym->auth.aad.length);
791 #endif
792         return 0;
793 }
794
795 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
796 {
797         uint32_t div = data >> shift;
798         uint32_t mult = div << shift;
799
800         return data - mult;
801 }
802
803 void qat_crypto_sym_session_init(struct rte_mempool *mp, void *priv_sess)
804 {
805         struct qat_session *s = priv_sess;
806
807         PMD_INIT_FUNC_TRACE();
808         s->cd_paddr = rte_mempool_virt2phy(mp, &s->cd);
809 }
810
811 int qat_dev_config(__rte_unused struct rte_cryptodev *dev)
812 {
813         PMD_INIT_FUNC_TRACE();
814         return -ENOTSUP;
815 }
816
817 int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
818 {
819         PMD_INIT_FUNC_TRACE();
820         return 0;
821 }
822
823 void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
824 {
825         PMD_INIT_FUNC_TRACE();
826 }
827
828 int qat_dev_close(struct rte_cryptodev *dev)
829 {
830         int i, ret;
831
832         PMD_INIT_FUNC_TRACE();
833
834         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
835                 ret = qat_crypto_sym_qp_release(dev, i);
836                 if (ret < 0)
837                         return ret;
838         }
839
840         return 0;
841 }
842
843 void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
844                                 struct rte_cryptodev_info *info)
845 {
846         struct qat_pmd_private *internals = dev->data->dev_private;
847
848         PMD_INIT_FUNC_TRACE();
849         if (info != NULL) {
850                 info->max_nb_queue_pairs =
851                                 ADF_NUM_SYM_QPS_PER_BUNDLE *
852                                 ADF_NUM_BUNDLES_PER_DEV;
853                 info->feature_flags = dev->feature_flags;
854                 info->capabilities = qat_pmd_capabilities;
855                 info->sym.max_nb_sessions = internals->max_nb_sessions;
856                 info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
857         }
858 }
859
860 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
861                 struct rte_cryptodev_stats *stats)
862 {
863         int i;
864         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
865
866         PMD_INIT_FUNC_TRACE();
867         if (stats == NULL) {
868                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
869                 return;
870         }
871         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
872                 if (qp[i] == NULL) {
873                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
874                         continue;
875                 }
876
877                 stats->enqueued_count += qp[i]->stats.enqueued_count;
878                 stats->dequeued_count += qp[i]->stats.enqueued_count;
879                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
880                 stats->dequeue_err_count += qp[i]->stats.enqueue_err_count;
881         }
882 }
883
884 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
885 {
886         int i;
887         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
888
889         PMD_INIT_FUNC_TRACE();
890         for (i = 0; i < dev->data->nb_queue_pairs; i++)
891                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
892         PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
893 }