4 * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/queue.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_tailq.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_launch.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_mempool.h>
59 #include <rte_string_fns.h>
60 #include <rte_spinlock.h>
61 #include <rte_hexdump.h>
62 #include <rte_crypto_sym.h>
63 #include <rte_cryptodev_pci.h>
64 #include <openssl/evp.h>
68 #include "qat_crypto.h"
69 #include "adf_transport_access_macros.h"
74 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
75 struct qat_pmd_private *internals) {
77 const struct rte_cryptodev_capabilities *capability;
79 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
80 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
81 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
84 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
87 if (capability->sym.cipher.algo == algo)
94 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
95 struct qat_pmd_private *internals) {
97 const struct rte_cryptodev_capabilities *capability;
99 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
100 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
101 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
104 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
107 if (capability->sym.auth.algo == algo)
113 /** Encrypt a single partial block
114 * Depends on openssl libcrypto
115 * Uses ECB+XOR to do CFB encryption, same result, more performant
118 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
119 uint8_t *iv, int ivlen, int srclen,
122 EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
124 uint8_t encrypted_iv[16];
127 /* ECB method: encrypt the IV, then XOR this with plaintext */
128 if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
130 goto cipher_encrypt_err;
132 for (i = 0; i < srclen; i++)
133 *(dst+i) = *(src+i)^(encrypted_iv[i]);
138 PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
142 /** Decrypt a single partial block
143 * Depends on openssl libcrypto
144 * Uses ECB+XOR to do CFB encryption, same result, more performant
147 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
148 uint8_t *iv, int ivlen, int srclen,
151 EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
153 uint8_t encrypted_iv[16];
156 /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
157 if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
159 goto cipher_decrypt_err;
161 for (i = 0; i < srclen; i++)
162 *(dst+i) = *(src+i)^(encrypted_iv[i]);
167 PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed");
171 /** Creates a context in either AES or DES in ECB mode
172 * Depends on openssl libcrypto
175 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
176 enum rte_crypto_cipher_operation direction __rte_unused,
179 const EVP_CIPHER *algo = NULL;
180 EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new();
185 if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
186 algo = EVP_des_ecb();
188 algo = EVP_aes_128_ecb();
190 /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
191 if (EVP_EncryptInit_ex(ctx, algo, NULL, key, 0) != 1)
198 EVP_CIPHER_CTX_free(ctx);
202 /** Frees a context previously created
203 * Depends on openssl libcrypto
206 bpi_cipher_ctx_free(void *bpi_ctx)
209 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
212 static inline uint32_t
213 adf_modulo(uint32_t data, uint32_t shift);
216 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
217 struct qat_crypto_op_cookie *qat_op_cookie);
219 void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
222 struct qat_session *sess = session;
223 phys_addr_t cd_paddr;
225 PMD_INIT_FUNC_TRACE();
228 bpi_cipher_ctx_free(sess->bpi_ctx);
229 sess->bpi_ctx = NULL;
231 cd_paddr = sess->cd_paddr;
232 memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
233 sess->cd_paddr = cd_paddr;
235 PMD_DRV_LOG(ERR, "NULL session");
239 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
242 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
243 return ICP_QAT_FW_LA_CMD_CIPHER;
245 /* Authentication Only */
246 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
247 return ICP_QAT_FW_LA_CMD_AUTH;
250 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
251 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
252 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
254 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
257 if (xform->next == NULL)
260 /* Cipher then Authenticate */
261 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
262 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
263 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
265 /* Authenticate then Cipher */
266 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
267 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
268 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
273 static struct rte_crypto_auth_xform *
274 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
277 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
286 static struct rte_crypto_cipher_xform *
287 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
290 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
291 return &xform->cipher;
299 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
300 struct rte_crypto_sym_xform *xform, void *session_private)
302 struct qat_session *session = session_private;
303 struct qat_pmd_private *internals = dev->data->dev_private;
304 struct rte_crypto_cipher_xform *cipher_xform = NULL;
306 /* Get cipher xform from crypto xform chain */
307 cipher_xform = qat_get_cipher_xform(xform);
309 session->cipher_iv.offset = cipher_xform->iv.offset;
310 session->cipher_iv.length = cipher_xform->iv.length;
312 switch (cipher_xform->algo) {
313 case RTE_CRYPTO_CIPHER_AES_CBC:
314 if (qat_alg_validate_aes_key(cipher_xform->key.length,
315 &session->qat_cipher_alg) != 0) {
316 PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
319 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
321 case RTE_CRYPTO_CIPHER_AES_CTR:
322 if (qat_alg_validate_aes_key(cipher_xform->key.length,
323 &session->qat_cipher_alg) != 0) {
324 PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
327 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
329 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
330 if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
331 &session->qat_cipher_alg) != 0) {
332 PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
335 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
337 case RTE_CRYPTO_CIPHER_NULL:
338 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
340 case RTE_CRYPTO_CIPHER_KASUMI_F8:
341 if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
342 &session->qat_cipher_alg) != 0) {
343 PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
346 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
348 case RTE_CRYPTO_CIPHER_3DES_CBC:
349 if (qat_alg_validate_3des_key(cipher_xform->key.length,
350 &session->qat_cipher_alg) != 0) {
351 PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
354 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
356 case RTE_CRYPTO_CIPHER_DES_CBC:
357 if (qat_alg_validate_des_key(cipher_xform->key.length,
358 &session->qat_cipher_alg) != 0) {
359 PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
362 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
364 case RTE_CRYPTO_CIPHER_3DES_CTR:
365 if (qat_alg_validate_3des_key(cipher_xform->key.length,
366 &session->qat_cipher_alg) != 0) {
367 PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
370 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
372 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
373 session->bpi_ctx = bpi_cipher_ctx_init(
376 cipher_xform->key.data);
377 if (session->bpi_ctx == NULL) {
378 PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
381 if (qat_alg_validate_des_key(cipher_xform->key.length,
382 &session->qat_cipher_alg) != 0) {
383 PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
386 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
388 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
389 session->bpi_ctx = bpi_cipher_ctx_init(
392 cipher_xform->key.data);
393 if (session->bpi_ctx == NULL) {
394 PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
397 if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
398 &session->qat_cipher_alg) != 0) {
399 PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
402 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
404 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
405 if (!qat_is_cipher_alg_supported(
406 cipher_xform->algo, internals)) {
407 PMD_DRV_LOG(ERR, "%s not supported on this device",
408 rte_crypto_cipher_algorithm_strings
409 [cipher_xform->algo]);
412 if (qat_alg_validate_zuc_key(cipher_xform->key.length,
413 &session->qat_cipher_alg) != 0) {
414 PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
417 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
419 case RTE_CRYPTO_CIPHER_3DES_ECB:
420 case RTE_CRYPTO_CIPHER_AES_ECB:
421 case RTE_CRYPTO_CIPHER_AES_F8:
422 case RTE_CRYPTO_CIPHER_AES_XTS:
423 case RTE_CRYPTO_CIPHER_ARC4:
424 PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
428 PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
433 if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
434 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
436 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
438 if (qat_alg_aead_session_create_content_desc_cipher(session,
439 cipher_xform->key.data,
440 cipher_xform->key.length))
446 if (session->bpi_ctx) {
447 bpi_cipher_ctx_free(session->bpi_ctx);
448 session->bpi_ctx = NULL;
455 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
456 struct rte_crypto_sym_xform *xform, void *session_private)
458 struct qat_session *session = session_private;
461 PMD_INIT_FUNC_TRACE();
463 /* Get requested QAT command id */
464 qat_cmd_id = qat_get_cmd_id(xform);
465 if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
466 PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
469 session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
470 switch (session->qat_cmd) {
471 case ICP_QAT_FW_LA_CMD_CIPHER:
472 session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
474 case ICP_QAT_FW_LA_CMD_AUTH:
475 session = qat_crypto_sym_configure_session_auth(dev, xform, session);
477 case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
478 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
479 session = qat_crypto_sym_configure_session_aead(xform,
482 session = qat_crypto_sym_configure_session_cipher(dev,
484 session = qat_crypto_sym_configure_session_auth(dev,
488 case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
489 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
490 session = qat_crypto_sym_configure_session_aead(xform,
493 session = qat_crypto_sym_configure_session_auth(dev,
495 session = qat_crypto_sym_configure_session_cipher(dev,
499 case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
500 case ICP_QAT_FW_LA_CMD_TRNG_TEST:
501 case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
502 case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
503 case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
504 case ICP_QAT_FW_LA_CMD_MGF1:
505 case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
506 case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
507 case ICP_QAT_FW_LA_CMD_DELIMITER:
508 PMD_DRV_LOG(ERR, "Unsupported Service %u",
512 PMD_DRV_LOG(ERR, "Unsupported Service %u",
524 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
525 struct rte_crypto_sym_xform *xform,
526 struct qat_session *session_private)
529 struct qat_session *session = session_private;
530 struct rte_crypto_auth_xform *auth_xform = NULL;
531 struct qat_pmd_private *internals = dev->data->dev_private;
532 auth_xform = qat_get_auth_xform(xform);
533 uint8_t *key_data = auth_xform->key.data;
534 uint8_t key_length = auth_xform->key.length;
536 switch (auth_xform->algo) {
537 case RTE_CRYPTO_AUTH_SHA1_HMAC:
538 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
540 case RTE_CRYPTO_AUTH_SHA224_HMAC:
541 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
543 case RTE_CRYPTO_AUTH_SHA256_HMAC:
544 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
546 case RTE_CRYPTO_AUTH_SHA384_HMAC:
547 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
549 case RTE_CRYPTO_AUTH_SHA512_HMAC:
550 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
552 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
553 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
555 case RTE_CRYPTO_AUTH_AES_GMAC:
556 if (qat_alg_validate_aes_key(auth_xform->key.length,
557 &session->qat_cipher_alg) != 0) {
558 PMD_DRV_LOG(ERR, "Invalid AES key size");
561 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
562 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
565 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
566 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
568 case RTE_CRYPTO_AUTH_MD5_HMAC:
569 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
571 case RTE_CRYPTO_AUTH_NULL:
572 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
574 case RTE_CRYPTO_AUTH_KASUMI_F9:
575 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
577 case RTE_CRYPTO_AUTH_ZUC_EIA3:
578 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
579 PMD_DRV_LOG(ERR, "%s not supported on this device",
580 rte_crypto_auth_algorithm_strings
584 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
586 case RTE_CRYPTO_AUTH_SHA1:
587 case RTE_CRYPTO_AUTH_SHA256:
588 case RTE_CRYPTO_AUTH_SHA512:
589 case RTE_CRYPTO_AUTH_SHA224:
590 case RTE_CRYPTO_AUTH_SHA384:
591 case RTE_CRYPTO_AUTH_MD5:
592 case RTE_CRYPTO_AUTH_AES_CMAC:
593 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
594 PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
598 PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
603 session->auth_iv.offset = auth_xform->iv.offset;
604 session->auth_iv.length = auth_xform->iv.length;
606 if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
607 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
608 session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
609 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
611 * It needs to create cipher desc content first,
612 * then authentication
614 if (qat_alg_aead_session_create_content_desc_cipher(session,
615 auth_xform->key.data,
616 auth_xform->key.length))
619 if (qat_alg_aead_session_create_content_desc_auth(session,
623 auth_xform->digest_length,
627 session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
628 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
630 * It needs to create authentication desc content first,
633 if (qat_alg_aead_session_create_content_desc_auth(session,
637 auth_xform->digest_length,
641 if (qat_alg_aead_session_create_content_desc_cipher(session,
642 auth_xform->key.data,
643 auth_xform->key.length))
646 /* Restore to authentication only only */
647 session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
649 if (qat_alg_aead_session_create_content_desc_auth(session,
653 auth_xform->digest_length,
658 session->digest_length = auth_xform->digest_length;
666 qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
667 struct qat_session *session_private)
669 struct qat_session *session = session_private;
670 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
673 * Store AEAD IV parameters as cipher IV,
674 * to avoid unnecessary memory usage
676 session->cipher_iv.offset = xform->aead.iv.offset;
677 session->cipher_iv.length = xform->aead.iv.length;
679 switch (aead_xform->algo) {
680 case RTE_CRYPTO_AEAD_AES_GCM:
681 if (qat_alg_validate_aes_key(aead_xform->key.length,
682 &session->qat_cipher_alg) != 0) {
683 PMD_DRV_LOG(ERR, "Invalid AES key size");
686 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
687 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
689 case RTE_CRYPTO_AEAD_AES_CCM:
690 PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported AEAD alg %u",
694 PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
699 if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
700 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
702 * It needs to create cipher desc content first,
703 * then authentication
705 if (qat_alg_aead_session_create_content_desc_cipher(session,
706 aead_xform->key.data,
707 aead_xform->key.length))
710 if (qat_alg_aead_session_create_content_desc_auth(session,
711 aead_xform->key.data,
712 aead_xform->key.length,
713 aead_xform->add_auth_data_length,
714 aead_xform->digest_length,
715 RTE_CRYPTO_AUTH_OP_GENERATE))
718 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
720 * It needs to create authentication desc content first,
723 if (qat_alg_aead_session_create_content_desc_auth(session,
724 aead_xform->key.data,
725 aead_xform->key.length,
726 aead_xform->add_auth_data_length,
727 aead_xform->digest_length,
728 RTE_CRYPTO_AUTH_OP_VERIFY))
731 if (qat_alg_aead_session_create_content_desc_cipher(session,
732 aead_xform->key.data,
733 aead_xform->key.length))
737 session->digest_length = aead_xform->digest_length;
744 unsigned qat_crypto_sym_get_session_private_size(
745 struct rte_cryptodev *dev __rte_unused)
747 return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
750 static inline uint32_t
751 qat_bpicipher_preprocess(struct qat_session *ctx,
752 struct rte_crypto_op *op)
754 uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
755 struct rte_crypto_sym_op *sym_op = op->sym;
756 uint8_t last_block_len = sym_op->cipher.data.length % block_len;
758 if (last_block_len &&
759 ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
761 /* Decrypt last block */
762 uint8_t *last_block, *dst, *iv;
763 uint32_t last_block_offset = sym_op->cipher.data.offset +
764 sym_op->cipher.data.length - last_block_len;
765 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
766 uint8_t *, last_block_offset);
768 if (unlikely(sym_op->m_dst != NULL))
769 /* out-of-place operation (OOP) */
770 dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
771 uint8_t *, last_block_offset);
775 if (last_block_len < sym_op->cipher.data.length)
776 /* use previous block ciphertext as IV */
777 iv = last_block - block_len;
779 /* runt block, i.e. less than one full block */
780 iv = rte_crypto_op_ctod_offset(op, uint8_t *,
781 ctx->cipher_iv.offset);
783 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
784 rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
786 if (sym_op->m_dst != NULL)
787 rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
790 bpi_cipher_decrypt(last_block, dst, iv, block_len,
791 last_block_len, ctx->bpi_ctx);
792 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
793 rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
795 if (sym_op->m_dst != NULL)
796 rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
801 return sym_op->cipher.data.length - last_block_len;
804 static inline uint32_t
805 qat_bpicipher_postprocess(struct qat_session *ctx,
806 struct rte_crypto_op *op)
808 uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
809 struct rte_crypto_sym_op *sym_op = op->sym;
810 uint8_t last_block_len = sym_op->cipher.data.length % block_len;
812 if (last_block_len > 0 &&
813 ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
815 /* Encrypt last block */
816 uint8_t *last_block, *dst, *iv;
817 uint32_t last_block_offset;
819 last_block_offset = sym_op->cipher.data.offset +
820 sym_op->cipher.data.length - last_block_len;
821 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
822 uint8_t *, last_block_offset);
824 if (unlikely(sym_op->m_dst != NULL))
825 /* out-of-place operation (OOP) */
826 dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
827 uint8_t *, last_block_offset);
831 if (last_block_len < sym_op->cipher.data.length)
832 /* use previous block ciphertext as IV */
833 iv = dst - block_len;
835 /* runt block, i.e. less than one full block */
836 iv = rte_crypto_op_ctod_offset(op, uint8_t *,
837 ctx->cipher_iv.offset);
839 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
840 rte_hexdump(stdout, "BPI: src before post-process:", last_block,
842 if (sym_op->m_dst != NULL)
843 rte_hexdump(stdout, "BPI: dst before post-process:",
844 dst, last_block_len);
846 bpi_cipher_encrypt(last_block, dst, iv, block_len,
847 last_block_len, ctx->bpi_ctx);
848 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
849 rte_hexdump(stdout, "BPI: src after post-process:", last_block,
851 if (sym_op->m_dst != NULL)
852 rte_hexdump(stdout, "BPI: dst after post-process:", dst,
856 return sym_op->cipher.data.length - last_block_len;
860 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
863 register struct qat_queue *queue;
864 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
865 register uint32_t nb_ops_sent = 0;
866 register struct rte_crypto_op **cur_op = ops;
868 uint16_t nb_ops_possible = nb_ops;
869 register uint8_t *base_addr;
870 register uint32_t tail;
873 if (unlikely(nb_ops == 0))
876 /* read params used a lot in main loop into registers */
877 queue = &(tmp_qp->tx_q);
878 base_addr = (uint8_t *)queue->base_addr;
881 /* Find how many can actually fit on the ring */
882 overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
883 - queue->max_inflights;
885 rte_atomic16_sub(&tmp_qp->inflights16, overflow);
886 nb_ops_possible = nb_ops - overflow;
887 if (nb_ops_possible == 0)
891 while (nb_ops_sent != nb_ops_possible) {
892 ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
893 tmp_qp->op_cookies[tail / queue->msg_size]);
895 tmp_qp->stats.enqueue_err_count++;
897 * This message cannot be enqueued,
898 * decrease number of ops that wasn't sent
900 rte_atomic16_sub(&tmp_qp->inflights16,
901 nb_ops_possible - nb_ops_sent);
902 if (nb_ops_sent == 0)
907 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
912 WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
913 queue->hw_queue_number, tail);
915 tmp_qp->stats.enqueued_count += nb_ops_sent;
920 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
923 struct qat_queue *queue;
924 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
925 uint32_t msg_counter = 0;
926 struct rte_crypto_op *rx_op;
927 struct icp_qat_fw_comn_resp *resp_msg;
929 queue = &(tmp_qp->rx_q);
930 resp_msg = (struct icp_qat_fw_comn_resp *)
931 ((uint8_t *)queue->base_addr + queue->head);
933 while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
934 msg_counter != nb_ops) {
935 rx_op = (struct rte_crypto_op *)(uintptr_t)
936 (resp_msg->opaque_data);
938 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
939 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
940 sizeof(struct icp_qat_fw_comn_resp));
943 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
944 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
945 resp_msg->comn_hdr.comn_status)) {
946 rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
948 struct qat_session *sess = (struct qat_session *)
949 (rx_op->sym->session->_private);
951 qat_bpicipher_postprocess(sess, rx_op);
952 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
955 *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
956 queue->head = adf_modulo(queue->head +
958 ADF_RING_SIZE_MODULO(queue->queue_size));
959 resp_msg = (struct icp_qat_fw_comn_resp *)
960 ((uint8_t *)queue->base_addr +
966 if (msg_counter > 0) {
967 WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
968 queue->hw_bundle_number,
969 queue->hw_queue_number, queue->head);
970 rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
971 tmp_qp->stats.dequeued_count += msg_counter;
977 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
978 struct qat_alg_buf_list *list, uint32_t data_len)
982 uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
983 buff_start + rte_pktmbuf_data_len(buf);
985 list->bufers[0].addr = buff_start;
986 list->bufers[0].resrvd = 0;
987 list->bufers[0].len = buf_len;
989 if (data_len <= buf_len) {
991 list->bufers[0].len = data_len;
997 if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
998 PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
1000 QAT_SGL_MAX_NUMBER);
1004 list->bufers[nr].len = rte_pktmbuf_data_len(buf);
1005 list->bufers[nr].resrvd = 0;
1006 list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
1008 buf_len += list->bufers[nr].len;
1011 if (buf_len > data_len) {
1012 list->bufers[nr].len -=
1018 list->num_bufs = nr;
1024 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
1025 struct icp_qat_fw_la_cipher_req_params *cipher_param,
1026 struct rte_crypto_op *op,
1027 struct icp_qat_fw_la_bulk_req *qat_req)
1029 /* copy IV into request if it fits */
1030 if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
1031 rte_memcpy(cipher_param->u.cipher_IV_array,
1032 rte_crypto_op_ctod_offset(op, uint8_t *,
1036 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
1037 qat_req->comn_hdr.serv_specif_flags,
1038 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
1039 cipher_param->u.s.cipher_IV_ptr =
1040 rte_crypto_op_ctophys_offset(op,
1046 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
1047 struct qat_crypto_op_cookie *qat_op_cookie)
1050 struct qat_session *ctx;
1051 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1052 struct icp_qat_fw_la_auth_req_params *auth_param;
1053 register struct icp_qat_fw_la_bulk_req *qat_req;
1054 uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
1055 uint32_t cipher_len = 0, cipher_ofs = 0;
1056 uint32_t auth_len = 0, auth_ofs = 0;
1057 uint32_t min_ofs = 0;
1058 uint64_t src_buf_start = 0, dst_buf_start = 0;
1061 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1062 if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
1063 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
1064 "operation requests, op (%p) is not a "
1065 "symmetric operation.", op);
1069 if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
1070 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
1071 " requests, op (%p) is sessionless.", op);
1075 if (unlikely(op->sym->session->driver_id !=
1076 cryptodev_qat_driver_id)) {
1077 PMD_DRV_LOG(ERR, "Session was not created for this device");
1081 ctx = (struct qat_session *)op->sym->session->_private;
1082 qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
1083 rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
1084 qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
1085 cipher_param = (void *)&qat_req->serv_specif_rqpars;
1086 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
1088 if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
1089 ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1091 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1092 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1098 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1101 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1108 if (ctx->qat_cipher_alg ==
1109 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
1110 ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
1111 ctx->qat_cipher_alg ==
1112 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1115 (cipher_param->cipher_length % BYTE_LENGTH != 0)
1116 || (cipher_param->cipher_offset
1117 % BYTE_LENGTH != 0))) {
1119 "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
1120 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1123 cipher_len = op->sym->cipher.data.length >> 3;
1124 cipher_ofs = op->sym->cipher.data.offset >> 3;
1126 } else if (ctx->bpi_ctx) {
1127 /* DOCSIS - only send complete blocks to device
1128 * Process any partial block using CFB mode.
1129 * Even if 0 complete blocks, still send this to device
1130 * to get into rx queue for post-process and dequeuing
1132 cipher_len = qat_bpicipher_preprocess(ctx, op);
1133 cipher_ofs = op->sym->cipher.data.offset;
1135 cipher_len = op->sym->cipher.data.length;
1136 cipher_ofs = op->sym->cipher.data.offset;
1139 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
1140 cipher_param, op, qat_req);
1141 min_ofs = cipher_ofs;
1146 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
1147 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
1148 ctx->qat_hash_alg ==
1149 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
1150 if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
1151 || (auth_param->auth_len % BYTE_LENGTH != 0))) {
1153 "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
1154 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1157 auth_ofs = op->sym->auth.data.offset >> 3;
1158 auth_len = op->sym->auth.data.length >> 3;
1160 if (ctx->qat_hash_alg ==
1161 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
1163 auth_len = auth_len + auth_ofs + 1 -
1164 ICP_QAT_HW_KASUMI_BLK_SZ;
1165 auth_ofs = ICP_QAT_HW_KASUMI_BLK_SZ;
1167 auth_len = auth_len + auth_ofs + 1;
1171 auth_param->u1.aad_adr =
1172 rte_crypto_op_ctophys_offset(op,
1173 ctx->auth_iv.offset);
1175 } else if (ctx->qat_hash_alg ==
1176 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1177 ctx->qat_hash_alg ==
1178 ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1180 set_cipher_iv(ctx->auth_iv.length,
1181 ctx->auth_iv.offset,
1182 cipher_param, op, qat_req);
1184 auth_ofs = op->sym->auth.data.offset;
1185 auth_len = op->sym->auth.data.length;
1190 auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
1195 cipher_len = op->sym->aead.data.length;
1196 cipher_ofs = op->sym->aead.data.offset;
1197 auth_len = op->sym->aead.data.length;
1198 auth_ofs = op->sym->aead.data.offset;
1200 auth_param->u1.aad_adr = op->sym->aead.aad.phys_addr;
1201 auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
1202 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
1203 cipher_param, op, qat_req);
1204 min_ofs = op->sym->aead.data.offset;
1207 if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
1210 /* adjust for chain case */
1211 if (do_cipher && do_auth)
1212 min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
1214 if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
1217 if (unlikely(op->sym->m_dst != NULL)) {
1218 /* Out-of-place operation (OOP)
1219 * Don't align DMA start. DMA the minimum data-set
1220 * so as not to overwrite data in dest buffer
1223 rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
1225 rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
1228 /* In-place operation
1229 * Start DMA at nearest aligned address below min_ofs
1232 rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
1233 & QAT_64_BTYE_ALIGN_MASK;
1235 if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
1236 rte_pktmbuf_headroom(op->sym->m_src))
1238 /* alignment has pushed addr ahead of start of mbuf
1239 * so revert and take the performance hit
1242 rte_pktmbuf_mtophys_offset(op->sym->m_src,
1245 dst_buf_start = src_buf_start;
1248 if (do_cipher || do_aead) {
1249 cipher_param->cipher_offset =
1250 (uint32_t)rte_pktmbuf_mtophys_offset(
1251 op->sym->m_src, cipher_ofs) - src_buf_start;
1252 cipher_param->cipher_length = cipher_len;
1254 cipher_param->cipher_offset = 0;
1255 cipher_param->cipher_length = 0;
1258 if (do_auth || do_aead) {
1259 auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
1260 op->sym->m_src, auth_ofs) - src_buf_start;
1261 auth_param->auth_len = auth_len;
1263 auth_param->auth_off = 0;
1264 auth_param->auth_len = 0;
1267 qat_req->comn_mid.dst_length =
1268 qat_req->comn_mid.src_length =
1269 (cipher_param->cipher_offset + cipher_param->cipher_length)
1270 > (auth_param->auth_off + auth_param->auth_len) ?
1271 (cipher_param->cipher_offset + cipher_param->cipher_length)
1272 : (auth_param->auth_off + auth_param->auth_len);
1276 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
1277 QAT_COMN_PTR_TYPE_SGL);
1278 ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
1279 &qat_op_cookie->qat_sgl_list_src,
1280 qat_req->comn_mid.src_length);
1282 PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
1286 if (likely(op->sym->m_dst == NULL))
1287 qat_req->comn_mid.dest_data_addr =
1288 qat_req->comn_mid.src_data_addr =
1289 qat_op_cookie->qat_sgl_src_phys_addr;
1291 ret = qat_sgl_fill_array(op->sym->m_dst,
1293 &qat_op_cookie->qat_sgl_list_dst,
1294 qat_req->comn_mid.dst_length);
1297 PMD_DRV_LOG(ERR, "QAT PMD Cannot "
1302 qat_req->comn_mid.src_data_addr =
1303 qat_op_cookie->qat_sgl_src_phys_addr;
1304 qat_req->comn_mid.dest_data_addr =
1305 qat_op_cookie->qat_sgl_dst_phys_addr;
1308 qat_req->comn_mid.src_data_addr = src_buf_start;
1309 qat_req->comn_mid.dest_data_addr = dst_buf_start;
1312 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1313 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1314 if (ctx->cipher_iv.length == 12 ||
1315 ctx->auth_iv.length == 12) {
1317 * For GCM a 12 byte IV is allowed,
1318 * but we need to inform the f/w
1320 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1321 qat_req->comn_hdr.serv_specif_flags,
1322 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1326 qat_req->comn_mid.dst_length =
1327 qat_req->comn_mid.src_length =
1328 rte_pktmbuf_data_len(op->sym->m_src);
1329 auth_param->u1.aad_adr = 0;
1330 auth_param->auth_len = op->sym->auth.data.length;
1331 auth_param->auth_off = op->sym->auth.data.offset;
1332 auth_param->u2.aad_sz = 0;
1336 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1337 rte_hexdump(stdout, "qat_req:", qat_req,
1338 sizeof(struct icp_qat_fw_la_bulk_req));
1339 rte_hexdump(stdout, "src_data:",
1340 rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
1341 rte_pktmbuf_data_len(op->sym->m_src));
1343 uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
1345 ctx->cipher_iv.offset);
1346 rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
1347 ctx->cipher_iv.length);
1351 if (ctx->auth_iv.length) {
1352 uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
1354 ctx->auth_iv.offset);
1355 rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
1356 ctx->auth_iv.length);
1358 rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
1359 ctx->digest_length);
1363 rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
1364 ctx->digest_length);
1365 rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
1372 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
1374 uint32_t div = data >> shift;
1375 uint32_t mult = div << shift;
1380 void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess)
1382 struct rte_cryptodev_sym_session *sess = sym_sess;
1383 struct qat_session *s = (void *)sess->_private;
1385 PMD_INIT_FUNC_TRACE();
1386 s->cd_paddr = rte_mempool_virt2phy(mp, sess) +
1387 offsetof(struct qat_session, cd) +
1388 offsetof(struct rte_cryptodev_sym_session, _private);
1391 int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
1392 __rte_unused struct rte_cryptodev_config *config)
1394 PMD_INIT_FUNC_TRACE();
1398 int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
1400 PMD_INIT_FUNC_TRACE();
1404 void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
1406 PMD_INIT_FUNC_TRACE();
1409 int qat_dev_close(struct rte_cryptodev *dev)
1413 PMD_INIT_FUNC_TRACE();
1415 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1416 ret = qat_crypto_sym_qp_release(dev, i);
1424 void qat_dev_info_get(struct rte_cryptodev *dev,
1425 struct rte_cryptodev_info *info)
1427 struct qat_pmd_private *internals = dev->data->dev_private;
1429 PMD_INIT_FUNC_TRACE();
1431 info->max_nb_queue_pairs =
1432 ADF_NUM_SYM_QPS_PER_BUNDLE *
1433 ADF_NUM_BUNDLES_PER_DEV;
1434 info->feature_flags = dev->feature_flags;
1435 info->capabilities = internals->qat_dev_capabilities;
1436 info->sym.max_nb_sessions = internals->max_nb_sessions;
1437 info->driver_id = cryptodev_qat_driver_id;
1438 info->pci_dev = RTE_DEV_TO_PCI(dev->device);
1442 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
1443 struct rte_cryptodev_stats *stats)
1446 struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1448 PMD_INIT_FUNC_TRACE();
1449 if (stats == NULL) {
1450 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1453 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1454 if (qp[i] == NULL) {
1455 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1459 stats->enqueued_count += qp[i]->stats.enqueued_count;
1460 stats->dequeued_count += qp[i]->stats.dequeued_count;
1461 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
1462 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
1466 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
1469 struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1471 PMD_INIT_FUNC_TRACE();
1472 for (i = 0; i < dev->data->nb_queue_pairs; i++)
1473 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
1474 PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");