1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
11 #include <sys/queue.h>
14 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_tailq.h>
19 #include <rte_malloc.h>
20 #include <rte_launch.h>
22 #include <rte_per_lcore.h>
23 #include <rte_lcore.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_mempool.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
29 #include <rte_hexdump.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_byteorder.h>
33 #include <rte_bus_pci.h>
35 #include <openssl/evp.h>
39 #include "qat_crypto.h"
40 #include "adf_transport_access_macros.h"
45 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
46 struct qat_pmd_private *internals) {
48 const struct rte_cryptodev_capabilities *capability;
50 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
51 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
52 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
55 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
58 if (capability->sym.cipher.algo == algo)
65 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
66 struct qat_pmd_private *internals) {
68 const struct rte_cryptodev_capabilities *capability;
70 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
71 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
72 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
75 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
78 if (capability->sym.auth.algo == algo)
84 /** Encrypt a single partial block
85 * Depends on openssl libcrypto
86 * Uses ECB+XOR to do CFB encryption, same result, more performant
89 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
90 uint8_t *iv, int ivlen, int srclen,
93 EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
95 uint8_t encrypted_iv[16];
98 /* ECB method: encrypt the IV, then XOR this with plaintext */
99 if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
101 goto cipher_encrypt_err;
103 for (i = 0; i < srclen; i++)
104 *(dst+i) = *(src+i)^(encrypted_iv[i]);
109 PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
113 /** Decrypt a single partial block
114 * Depends on openssl libcrypto
115 * Uses ECB+XOR to do CFB encryption, same result, more performant
118 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
119 uint8_t *iv, int ivlen, int srclen,
122 EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
124 uint8_t encrypted_iv[16];
127 /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
128 if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
130 goto cipher_decrypt_err;
132 for (i = 0; i < srclen; i++)
133 *(dst+i) = *(src+i)^(encrypted_iv[i]);
138 PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed");
142 /** Creates a context in either AES or DES in ECB mode
143 * Depends on openssl libcrypto
146 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
147 enum rte_crypto_cipher_operation direction __rte_unused,
148 uint8_t *key, void **ctx)
150 const EVP_CIPHER *algo = NULL;
152 *ctx = EVP_CIPHER_CTX_new();
159 if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
160 algo = EVP_des_ecb();
162 algo = EVP_aes_128_ecb();
164 /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
165 if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
174 EVP_CIPHER_CTX_free(*ctx);
178 /** Frees a context previously created
179 * Depends on openssl libcrypto
182 bpi_cipher_ctx_free(void *bpi_ctx)
185 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
188 static inline uint32_t
189 adf_modulo(uint32_t data, uint32_t shift);
192 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
193 struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
196 qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
197 struct rte_cryptodev_sym_session *sess)
199 PMD_INIT_FUNC_TRACE();
200 uint8_t index = dev->driver_id;
201 void *sess_priv = get_session_private_data(sess, index);
202 struct qat_session *s = (struct qat_session *)sess_priv;
206 bpi_cipher_ctx_free(s->bpi_ctx);
207 memset(s, 0, qat_crypto_sym_get_session_private_size(dev));
208 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
209 set_session_private_data(sess, index, NULL);
210 rte_mempool_put(sess_mp, sess_priv);
215 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
218 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
219 return ICP_QAT_FW_LA_CMD_CIPHER;
221 /* Authentication Only */
222 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
223 return ICP_QAT_FW_LA_CMD_AUTH;
226 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
227 /* AES-GCM and AES-CCM works with different direction
228 * GCM first encrypts and generate hash where AES-CCM
229 * first generate hash and encrypts. Similar relation
230 * applies to decryption.
232 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
233 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
234 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
236 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
238 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
239 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
241 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
244 if (xform->next == NULL)
247 /* Cipher then Authenticate */
248 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
249 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
250 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
252 /* Authenticate then Cipher */
253 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
254 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
255 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
260 static struct rte_crypto_auth_xform *
261 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
264 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
273 static struct rte_crypto_cipher_xform *
274 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
277 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
278 return &xform->cipher;
287 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
288 struct rte_crypto_sym_xform *xform,
289 struct qat_session *session)
291 struct qat_pmd_private *internals = dev->data->dev_private;
292 struct rte_crypto_cipher_xform *cipher_xform = NULL;
295 /* Get cipher xform from crypto xform chain */
296 cipher_xform = qat_get_cipher_xform(xform);
298 session->cipher_iv.offset = cipher_xform->iv.offset;
299 session->cipher_iv.length = cipher_xform->iv.length;
301 switch (cipher_xform->algo) {
302 case RTE_CRYPTO_CIPHER_AES_CBC:
303 if (qat_alg_validate_aes_key(cipher_xform->key.length,
304 &session->qat_cipher_alg) != 0) {
305 PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
309 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
311 case RTE_CRYPTO_CIPHER_AES_CTR:
312 if (qat_alg_validate_aes_key(cipher_xform->key.length,
313 &session->qat_cipher_alg) != 0) {
314 PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
318 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
320 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
321 if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
322 &session->qat_cipher_alg) != 0) {
323 PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
327 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
329 case RTE_CRYPTO_CIPHER_NULL:
330 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
332 case RTE_CRYPTO_CIPHER_KASUMI_F8:
333 if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
334 &session->qat_cipher_alg) != 0) {
335 PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
339 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
341 case RTE_CRYPTO_CIPHER_3DES_CBC:
342 if (qat_alg_validate_3des_key(cipher_xform->key.length,
343 &session->qat_cipher_alg) != 0) {
344 PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
348 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
350 case RTE_CRYPTO_CIPHER_DES_CBC:
351 if (qat_alg_validate_des_key(cipher_xform->key.length,
352 &session->qat_cipher_alg) != 0) {
353 PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
357 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
359 case RTE_CRYPTO_CIPHER_3DES_CTR:
360 if (qat_alg_validate_3des_key(cipher_xform->key.length,
361 &session->qat_cipher_alg) != 0) {
362 PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
366 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
368 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
369 ret = bpi_cipher_ctx_init(
372 cipher_xform->key.data,
375 PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
378 if (qat_alg_validate_des_key(cipher_xform->key.length,
379 &session->qat_cipher_alg) != 0) {
380 PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
384 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
386 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
387 ret = bpi_cipher_ctx_init(
390 cipher_xform->key.data,
393 PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
396 if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
397 &session->qat_cipher_alg) != 0) {
398 PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
402 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
404 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
405 if (!qat_is_cipher_alg_supported(
406 cipher_xform->algo, internals)) {
407 PMD_DRV_LOG(ERR, "%s not supported on this device",
408 rte_crypto_cipher_algorithm_strings
409 [cipher_xform->algo]);
413 if (qat_alg_validate_zuc_key(cipher_xform->key.length,
414 &session->qat_cipher_alg) != 0) {
415 PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
419 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
421 case RTE_CRYPTO_CIPHER_3DES_ECB:
422 case RTE_CRYPTO_CIPHER_AES_ECB:
423 case RTE_CRYPTO_CIPHER_AES_F8:
424 case RTE_CRYPTO_CIPHER_AES_XTS:
425 case RTE_CRYPTO_CIPHER_ARC4:
426 PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
431 PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
437 if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
438 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
440 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
442 if (qat_alg_aead_session_create_content_desc_cipher(session,
443 cipher_xform->key.data,
444 cipher_xform->key.length)) {
452 if (session->bpi_ctx) {
453 bpi_cipher_ctx_free(session->bpi_ctx);
454 session->bpi_ctx = NULL;
460 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
461 struct rte_crypto_sym_xform *xform,
462 struct rte_cryptodev_sym_session *sess,
463 struct rte_mempool *mempool)
465 void *sess_private_data;
468 if (rte_mempool_get(mempool, &sess_private_data)) {
470 "Couldn't get object from session mempool");
474 ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data);
476 PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure "
477 "session parameters");
479 /* Return session to mempool */
480 rte_mempool_put(mempool, sess_private_data);
484 set_session_private_data(sess, dev->driver_id,
491 qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
492 struct rte_crypto_sym_xform *xform, void *session_private)
494 struct qat_session *session = session_private;
498 PMD_INIT_FUNC_TRACE();
500 /* Set context descriptor physical address */
501 session->cd_paddr = rte_mempool_virt2iova(session) +
502 offsetof(struct qat_session, cd);
504 session->min_qat_dev_gen = QAT_GEN1;
506 /* Get requested QAT command id */
507 qat_cmd_id = qat_get_cmd_id(xform);
508 if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
509 PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
512 session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
513 switch (session->qat_cmd) {
514 case ICP_QAT_FW_LA_CMD_CIPHER:
515 ret = qat_crypto_sym_configure_session_cipher(dev, xform, session);
519 case ICP_QAT_FW_LA_CMD_AUTH:
520 ret = qat_crypto_sym_configure_session_auth(dev, xform, session);
524 case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
525 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
526 ret = qat_crypto_sym_configure_session_aead(xform,
531 ret = qat_crypto_sym_configure_session_cipher(dev,
535 ret = qat_crypto_sym_configure_session_auth(dev,
541 case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
542 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
543 ret = qat_crypto_sym_configure_session_aead(xform,
548 ret = qat_crypto_sym_configure_session_auth(dev,
552 ret = qat_crypto_sym_configure_session_cipher(dev,
558 case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
559 case ICP_QAT_FW_LA_CMD_TRNG_TEST:
560 case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
561 case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
562 case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
563 case ICP_QAT_FW_LA_CMD_MGF1:
564 case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
565 case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
566 case ICP_QAT_FW_LA_CMD_DELIMITER:
567 PMD_DRV_LOG(ERR, "Unsupported Service %u",
571 PMD_DRV_LOG(ERR, "Unsupported Service %u",
580 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
581 struct rte_crypto_sym_xform *xform,
582 struct qat_session *session)
584 struct rte_crypto_auth_xform *auth_xform = NULL;
585 struct qat_pmd_private *internals = dev->data->dev_private;
586 auth_xform = qat_get_auth_xform(xform);
587 uint8_t *key_data = auth_xform->key.data;
588 uint8_t key_length = auth_xform->key.length;
590 switch (auth_xform->algo) {
591 case RTE_CRYPTO_AUTH_SHA1_HMAC:
592 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
594 case RTE_CRYPTO_AUTH_SHA224_HMAC:
595 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
597 case RTE_CRYPTO_AUTH_SHA256_HMAC:
598 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
600 case RTE_CRYPTO_AUTH_SHA384_HMAC:
601 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
603 case RTE_CRYPTO_AUTH_SHA512_HMAC:
604 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
606 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
607 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
609 case RTE_CRYPTO_AUTH_AES_GMAC:
610 if (qat_alg_validate_aes_key(auth_xform->key.length,
611 &session->qat_cipher_alg) != 0) {
612 PMD_DRV_LOG(ERR, "Invalid AES key size");
615 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
616 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
619 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
620 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
622 case RTE_CRYPTO_AUTH_MD5_HMAC:
623 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
625 case RTE_CRYPTO_AUTH_NULL:
626 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
628 case RTE_CRYPTO_AUTH_KASUMI_F9:
629 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
631 case RTE_CRYPTO_AUTH_ZUC_EIA3:
632 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
633 PMD_DRV_LOG(ERR, "%s not supported on this device",
634 rte_crypto_auth_algorithm_strings
638 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
640 case RTE_CRYPTO_AUTH_SHA1:
641 case RTE_CRYPTO_AUTH_SHA256:
642 case RTE_CRYPTO_AUTH_SHA512:
643 case RTE_CRYPTO_AUTH_SHA224:
644 case RTE_CRYPTO_AUTH_SHA384:
645 case RTE_CRYPTO_AUTH_MD5:
646 case RTE_CRYPTO_AUTH_AES_CMAC:
647 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
648 PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
652 PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
657 session->auth_iv.offset = auth_xform->iv.offset;
658 session->auth_iv.length = auth_xform->iv.length;
660 if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
661 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
662 session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
663 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
665 * It needs to create cipher desc content first,
666 * then authentication
668 if (qat_alg_aead_session_create_content_desc_cipher(session,
669 auth_xform->key.data,
670 auth_xform->key.length))
673 if (qat_alg_aead_session_create_content_desc_auth(session,
677 auth_xform->digest_length,
681 session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
682 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
684 * It needs to create authentication desc content first,
687 if (qat_alg_aead_session_create_content_desc_auth(session,
691 auth_xform->digest_length,
695 if (qat_alg_aead_session_create_content_desc_cipher(session,
696 auth_xform->key.data,
697 auth_xform->key.length))
700 /* Restore to authentication only only */
701 session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
703 if (qat_alg_aead_session_create_content_desc_auth(session,
707 auth_xform->digest_length,
712 session->digest_length = auth_xform->digest_length;
717 qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
718 struct qat_session *session)
720 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
721 enum rte_crypto_auth_operation crypto_operation;
724 * Store AEAD IV parameters as cipher IV,
725 * to avoid unnecessary memory usage
727 session->cipher_iv.offset = xform->aead.iv.offset;
728 session->cipher_iv.length = xform->aead.iv.length;
730 switch (aead_xform->algo) {
731 case RTE_CRYPTO_AEAD_AES_GCM:
732 if (qat_alg_validate_aes_key(aead_xform->key.length,
733 &session->qat_cipher_alg) != 0) {
734 PMD_DRV_LOG(ERR, "Invalid AES key size");
737 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
738 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
740 case RTE_CRYPTO_AEAD_AES_CCM:
741 if (qat_alg_validate_aes_key(aead_xform->key.length,
742 &session->qat_cipher_alg) != 0) {
743 PMD_DRV_LOG(ERR, "Invalid AES key size");
746 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
747 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
750 PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
755 if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
756 aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
757 (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
758 aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
759 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
761 * It needs to create cipher desc content first,
762 * then authentication
765 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
766 RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
768 if (qat_alg_aead_session_create_content_desc_cipher(session,
769 aead_xform->key.data,
770 aead_xform->key.length))
773 if (qat_alg_aead_session_create_content_desc_auth(session,
774 aead_xform->key.data,
775 aead_xform->key.length,
776 aead_xform->aad_length,
777 aead_xform->digest_length,
781 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
783 * It needs to create authentication desc content first,
787 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
788 RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
790 if (qat_alg_aead_session_create_content_desc_auth(session,
791 aead_xform->key.data,
792 aead_xform->key.length,
793 aead_xform->aad_length,
794 aead_xform->digest_length,
798 if (qat_alg_aead_session_create_content_desc_cipher(session,
799 aead_xform->key.data,
800 aead_xform->key.length))
804 session->digest_length = aead_xform->digest_length;
808 unsigned qat_crypto_sym_get_session_private_size(
809 struct rte_cryptodev *dev __rte_unused)
811 return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
814 static inline uint32_t
815 qat_bpicipher_preprocess(struct qat_session *ctx,
816 struct rte_crypto_op *op)
818 uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
819 struct rte_crypto_sym_op *sym_op = op->sym;
820 uint8_t last_block_len = block_len > 0 ?
821 sym_op->cipher.data.length % block_len : 0;
823 if (last_block_len &&
824 ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
826 /* Decrypt last block */
827 uint8_t *last_block, *dst, *iv;
828 uint32_t last_block_offset = sym_op->cipher.data.offset +
829 sym_op->cipher.data.length - last_block_len;
830 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
831 uint8_t *, last_block_offset);
833 if (unlikely(sym_op->m_dst != NULL))
834 /* out-of-place operation (OOP) */
835 dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
836 uint8_t *, last_block_offset);
840 if (last_block_len < sym_op->cipher.data.length)
841 /* use previous block ciphertext as IV */
842 iv = last_block - block_len;
844 /* runt block, i.e. less than one full block */
845 iv = rte_crypto_op_ctod_offset(op, uint8_t *,
846 ctx->cipher_iv.offset);
848 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
849 rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
851 if (sym_op->m_dst != NULL)
852 rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
855 bpi_cipher_decrypt(last_block, dst, iv, block_len,
856 last_block_len, ctx->bpi_ctx);
857 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
858 rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
860 if (sym_op->m_dst != NULL)
861 rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
866 return sym_op->cipher.data.length - last_block_len;
869 static inline uint32_t
870 qat_bpicipher_postprocess(struct qat_session *ctx,
871 struct rte_crypto_op *op)
873 uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
874 struct rte_crypto_sym_op *sym_op = op->sym;
875 uint8_t last_block_len = block_len > 0 ?
876 sym_op->cipher.data.length % block_len : 0;
878 if (last_block_len > 0 &&
879 ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
881 /* Encrypt last block */
882 uint8_t *last_block, *dst, *iv;
883 uint32_t last_block_offset;
885 last_block_offset = sym_op->cipher.data.offset +
886 sym_op->cipher.data.length - last_block_len;
887 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
888 uint8_t *, last_block_offset);
890 if (unlikely(sym_op->m_dst != NULL))
891 /* out-of-place operation (OOP) */
892 dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
893 uint8_t *, last_block_offset);
897 if (last_block_len < sym_op->cipher.data.length)
898 /* use previous block ciphertext as IV */
899 iv = dst - block_len;
901 /* runt block, i.e. less than one full block */
902 iv = rte_crypto_op_ctod_offset(op, uint8_t *,
903 ctx->cipher_iv.offset);
905 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
906 rte_hexdump(stdout, "BPI: src before post-process:", last_block,
908 if (sym_op->m_dst != NULL)
909 rte_hexdump(stdout, "BPI: dst before post-process:",
910 dst, last_block_len);
912 bpi_cipher_encrypt(last_block, dst, iv, block_len,
913 last_block_len, ctx->bpi_ctx);
914 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
915 rte_hexdump(stdout, "BPI: src after post-process:", last_block,
917 if (sym_op->m_dst != NULL)
918 rte_hexdump(stdout, "BPI: dst after post-process:", dst,
922 return sym_op->cipher.data.length - last_block_len;
926 txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
927 WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
928 q->hw_queue_number, q->tail);
929 q->nb_pending_requests = 0;
930 q->csr_tail = q->tail;
934 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
937 register struct qat_queue *queue;
938 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
939 register uint32_t nb_ops_sent = 0;
940 register struct rte_crypto_op **cur_op = ops;
942 uint16_t nb_ops_possible = nb_ops;
943 register uint8_t *base_addr;
944 register uint32_t tail;
947 if (unlikely(nb_ops == 0))
950 /* read params used a lot in main loop into registers */
951 queue = &(tmp_qp->tx_q);
952 base_addr = (uint8_t *)queue->base_addr;
955 /* Find how many can actually fit on the ring */
956 tmp_qp->inflights16 += nb_ops;
957 overflow = tmp_qp->inflights16 - queue->max_inflights;
959 tmp_qp->inflights16 -= overflow;
960 nb_ops_possible = nb_ops - overflow;
961 if (nb_ops_possible == 0)
965 while (nb_ops_sent != nb_ops_possible) {
966 ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
967 tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
969 tmp_qp->stats.enqueue_err_count++;
971 * This message cannot be enqueued,
972 * decrease number of ops that wasn't sent
974 tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
975 if (nb_ops_sent == 0)
980 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
986 tmp_qp->stats.enqueued_count += nb_ops_sent;
987 queue->nb_pending_requests += nb_ops_sent;
988 if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
989 queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
990 txq_write_tail(tmp_qp, queue);
996 void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
998 uint32_t old_head, new_head;
1001 old_head = q->csr_head;
1003 max_head = qp->nb_descriptors * q->msg_size;
1005 /* write out free descriptors */
1006 void *cur_desc = (uint8_t *)q->base_addr + old_head;
1008 if (new_head < old_head) {
1009 memset(cur_desc, ADF_RING_EMPTY_SIG, max_head - old_head);
1010 memset(q->base_addr, ADF_RING_EMPTY_SIG, new_head);
1012 memset(cur_desc, ADF_RING_EMPTY_SIG, new_head - old_head);
1014 q->nb_processed_responses = 0;
1015 q->csr_head = new_head;
1017 /* write current head to CSR */
1018 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
1019 q->hw_queue_number, new_head);
1023 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
1026 struct qat_queue *rx_queue, *tx_queue;
1027 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
1028 uint32_t msg_counter = 0;
1029 struct rte_crypto_op *rx_op;
1030 struct icp_qat_fw_comn_resp *resp_msg;
1033 rx_queue = &(tmp_qp->rx_q);
1034 tx_queue = &(tmp_qp->tx_q);
1035 head = rx_queue->head;
1036 resp_msg = (struct icp_qat_fw_comn_resp *)
1037 ((uint8_t *)rx_queue->base_addr + head);
1039 while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
1040 msg_counter != nb_ops) {
1041 rx_op = (struct rte_crypto_op *)(uintptr_t)
1042 (resp_msg->opaque_data);
1044 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
1045 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
1046 sizeof(struct icp_qat_fw_comn_resp));
1048 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
1049 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
1050 resp_msg->comn_hdr.comn_status)) {
1051 rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1053 struct qat_session *sess = (struct qat_session *)
1054 get_session_private_data(
1055 rx_op->sym->session,
1056 cryptodev_qat_driver_id);
1059 qat_bpicipher_postprocess(sess, rx_op);
1060 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1063 head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
1064 resp_msg = (struct icp_qat_fw_comn_resp *)
1065 ((uint8_t *)rx_queue->base_addr + head);
1070 if (msg_counter > 0) {
1071 rx_queue->head = head;
1072 tmp_qp->stats.dequeued_count += msg_counter;
1073 rx_queue->nb_processed_responses += msg_counter;
1074 tmp_qp->inflights16 -= msg_counter;
1076 if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
1077 rxq_free_desc(tmp_qp, rx_queue);
1079 /* also check if tail needs to be advanced */
1080 if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
1081 tx_queue->tail != tx_queue->csr_tail) {
1082 txq_write_tail(tmp_qp, tx_queue);
1088 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
1089 struct qat_alg_buf_list *list, uint32_t data_len)
1093 uint32_t buf_len = rte_pktmbuf_iova(buf) -
1094 buff_start + rte_pktmbuf_data_len(buf);
1096 list->bufers[0].addr = buff_start;
1097 list->bufers[0].resrvd = 0;
1098 list->bufers[0].len = buf_len;
1100 if (data_len <= buf_len) {
1101 list->num_bufs = nr;
1102 list->bufers[0].len = data_len;
1108 if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
1109 PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
1111 QAT_SGL_MAX_NUMBER);
1115 list->bufers[nr].len = rte_pktmbuf_data_len(buf);
1116 list->bufers[nr].resrvd = 0;
1117 list->bufers[nr].addr = rte_pktmbuf_iova(buf);
1119 buf_len += list->bufers[nr].len;
1122 if (buf_len > data_len) {
1123 list->bufers[nr].len -=
1129 list->num_bufs = nr;
1135 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
1136 struct icp_qat_fw_la_cipher_req_params *cipher_param,
1137 struct rte_crypto_op *op,
1138 struct icp_qat_fw_la_bulk_req *qat_req)
1140 /* copy IV into request if it fits */
1141 if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
1142 rte_memcpy(cipher_param->u.cipher_IV_array,
1143 rte_crypto_op_ctod_offset(op, uint8_t *,
1147 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
1148 qat_req->comn_hdr.serv_specif_flags,
1149 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
1150 cipher_param->u.s.cipher_IV_ptr =
1151 rte_crypto_op_ctophys_offset(op,
1156 /** Set IV for CCM is special case, 0th byte is set to q-1
1157 * where q is padding of nonce in 16 byte block
1160 set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
1161 struct icp_qat_fw_la_cipher_req_params *cipher_param,
1162 struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
1164 rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
1165 ICP_QAT_HW_CCM_NONCE_OFFSET,
1166 rte_crypto_op_ctod_offset(op, uint8_t *,
1167 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
1169 *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
1170 q - ICP_QAT_HW_CCM_NONCE_OFFSET;
1172 if (aad_len_field_sz)
1173 rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
1174 rte_crypto_op_ctod_offset(op, uint8_t *,
1175 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
1180 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
1181 struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
1184 struct qat_session *ctx;
1185 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1186 struct icp_qat_fw_la_auth_req_params *auth_param;
1187 register struct icp_qat_fw_la_bulk_req *qat_req;
1188 uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
1189 uint32_t cipher_len = 0, cipher_ofs = 0;
1190 uint32_t auth_len = 0, auth_ofs = 0;
1191 uint32_t min_ofs = 0;
1192 uint64_t src_buf_start = 0, dst_buf_start = 0;
1195 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1196 if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
1197 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
1198 "operation requests, op (%p) is not a "
1199 "symmetric operation.", op);
1203 if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
1204 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
1205 " requests, op (%p) is sessionless.", op);
1209 ctx = (struct qat_session *)get_session_private_data(
1210 op->sym->session, cryptodev_qat_driver_id);
1212 if (unlikely(ctx == NULL)) {
1213 PMD_DRV_LOG(ERR, "Session was not created for this device");
1217 if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
1218 PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
1219 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1225 qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
1226 rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
1227 qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
1228 cipher_param = (void *)&qat_req->serv_specif_rqpars;
1229 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
1231 if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
1232 ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1233 /* AES-GCM or AES-CCM */
1234 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1235 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
1236 (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
1237 && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
1238 && ctx->qat_hash_alg ==
1239 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
1245 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1248 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1255 if (ctx->qat_cipher_alg ==
1256 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
1257 ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
1258 ctx->qat_cipher_alg ==
1259 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1262 (cipher_param->cipher_length % BYTE_LENGTH != 0)
1263 || (cipher_param->cipher_offset
1264 % BYTE_LENGTH != 0))) {
1266 "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
1267 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1270 cipher_len = op->sym->cipher.data.length >> 3;
1271 cipher_ofs = op->sym->cipher.data.offset >> 3;
1273 } else if (ctx->bpi_ctx) {
1274 /* DOCSIS - only send complete blocks to device
1275 * Process any partial block using CFB mode.
1276 * Even if 0 complete blocks, still send this to device
1277 * to get into rx queue for post-process and dequeuing
1279 cipher_len = qat_bpicipher_preprocess(ctx, op);
1280 cipher_ofs = op->sym->cipher.data.offset;
1282 cipher_len = op->sym->cipher.data.length;
1283 cipher_ofs = op->sym->cipher.data.offset;
1286 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
1287 cipher_param, op, qat_req);
1288 min_ofs = cipher_ofs;
1293 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
1294 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
1295 ctx->qat_hash_alg ==
1296 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
1297 if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
1298 || (auth_param->auth_len % BYTE_LENGTH != 0))) {
1300 "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
1301 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1304 auth_ofs = op->sym->auth.data.offset >> 3;
1305 auth_len = op->sym->auth.data.length >> 3;
1307 auth_param->u1.aad_adr =
1308 rte_crypto_op_ctophys_offset(op,
1309 ctx->auth_iv.offset);
1311 } else if (ctx->qat_hash_alg ==
1312 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1313 ctx->qat_hash_alg ==
1314 ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1316 set_cipher_iv(ctx->auth_iv.length,
1317 ctx->auth_iv.offset,
1318 cipher_param, op, qat_req);
1319 auth_ofs = op->sym->auth.data.offset;
1320 auth_len = op->sym->auth.data.length;
1322 auth_param->u1.aad_adr = 0;
1323 auth_param->u2.aad_sz = 0;
1326 * If len(iv)==12B fw computes J0
1328 if (ctx->auth_iv.length == 12) {
1329 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1330 qat_req->comn_hdr.serv_specif_flags,
1331 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1335 auth_ofs = op->sym->auth.data.offset;
1336 auth_len = op->sym->auth.data.length;
1341 auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
1347 * This address may used for setting AAD physical pointer
1348 * into IV offset from op
1350 rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
1351 if (ctx->qat_hash_alg ==
1352 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1353 ctx->qat_hash_alg ==
1354 ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1356 * If len(iv)==12B fw computes J0
1358 if (ctx->cipher_iv.length == 12) {
1359 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1360 qat_req->comn_hdr.serv_specif_flags,
1361 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1364 set_cipher_iv(ctx->cipher_iv.length,
1365 ctx->cipher_iv.offset,
1366 cipher_param, op, qat_req);
1368 } else if (ctx->qat_hash_alg ==
1369 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
1371 /* In case of AES-CCM this may point to user selected memory
1372 * or iv offset in cypto_op
1374 uint8_t *aad_data = op->sym->aead.aad.data;
1375 /* This is true AAD length, it not includes 18 bytes of
1378 uint8_t aad_ccm_real_len = 0;
1380 uint8_t aad_len_field_sz = 0;
1381 uint32_t msg_len_be =
1382 rte_bswap32(op->sym->aead.data.length);
1384 if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
1385 aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
1386 aad_ccm_real_len = ctx->aad_len -
1387 ICP_QAT_HW_CCM_AAD_B0_LEN -
1388 ICP_QAT_HW_CCM_AAD_LEN_INFO;
1391 * aad_len not greater than 18, so no actual aad data,
1392 * then use IV after op for B0 block
1394 aad_data = rte_crypto_op_ctod_offset(op, uint8_t *,
1395 ctx->cipher_iv.offset);
1396 aad_phys_addr_aead =
1397 rte_crypto_op_ctophys_offset(op,
1398 ctx->cipher_iv.offset);
1401 uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
1403 aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
1404 ctx->digest_length, q);
1406 if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
1407 memcpy(aad_data + ctx->cipher_iv.length +
1408 ICP_QAT_HW_CCM_NONCE_OFFSET
1409 + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
1410 (uint8_t *)&msg_len_be,
1411 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
1413 memcpy(aad_data + ctx->cipher_iv.length +
1414 ICP_QAT_HW_CCM_NONCE_OFFSET,
1415 (uint8_t *)&msg_len_be
1416 + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
1420 if (aad_len_field_sz > 0) {
1421 *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
1422 = rte_bswap16(aad_ccm_real_len);
1424 if ((aad_ccm_real_len + aad_len_field_sz)
1425 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
1426 uint8_t pad_len = 0;
1427 uint8_t pad_idx = 0;
1429 pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
1430 ((aad_ccm_real_len + aad_len_field_sz) %
1431 ICP_QAT_HW_CCM_AAD_B0_LEN);
1432 pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
1433 aad_ccm_real_len + aad_len_field_sz;
1434 memset(&aad_data[pad_idx],
1440 set_cipher_iv_ccm(ctx->cipher_iv.length,
1441 ctx->cipher_iv.offset,
1442 cipher_param, op, q,
1447 cipher_len = op->sym->aead.data.length;
1448 cipher_ofs = op->sym->aead.data.offset;
1449 auth_len = op->sym->aead.data.length;
1450 auth_ofs = op->sym->aead.data.offset;
1452 auth_param->u1.aad_adr = aad_phys_addr_aead;
1453 auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
1454 min_ofs = op->sym->aead.data.offset;
1457 if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
1460 /* adjust for chain case */
1461 if (do_cipher && do_auth)
1462 min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
1464 if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
1467 if (unlikely(op->sym->m_dst != NULL)) {
1468 /* Out-of-place operation (OOP)
1469 * Don't align DMA start. DMA the minimum data-set
1470 * so as not to overwrite data in dest buffer
1473 rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
1475 rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
1478 /* In-place operation
1479 * Start DMA at nearest aligned address below min_ofs
1482 rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
1483 & QAT_64_BTYE_ALIGN_MASK;
1485 if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
1486 rte_pktmbuf_headroom(op->sym->m_src))
1488 /* alignment has pushed addr ahead of start of mbuf
1489 * so revert and take the performance hit
1492 rte_pktmbuf_iova_offset(op->sym->m_src,
1495 dst_buf_start = src_buf_start;
1498 if (do_cipher || do_aead) {
1499 cipher_param->cipher_offset =
1500 (uint32_t)rte_pktmbuf_iova_offset(
1501 op->sym->m_src, cipher_ofs) - src_buf_start;
1502 cipher_param->cipher_length = cipher_len;
1504 cipher_param->cipher_offset = 0;
1505 cipher_param->cipher_length = 0;
1508 if (do_auth || do_aead) {
1509 auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
1510 op->sym->m_src, auth_ofs) - src_buf_start;
1511 auth_param->auth_len = auth_len;
1513 auth_param->auth_off = 0;
1514 auth_param->auth_len = 0;
1517 qat_req->comn_mid.dst_length =
1518 qat_req->comn_mid.src_length =
1519 (cipher_param->cipher_offset + cipher_param->cipher_length)
1520 > (auth_param->auth_off + auth_param->auth_len) ?
1521 (cipher_param->cipher_offset + cipher_param->cipher_length)
1522 : (auth_param->auth_off + auth_param->auth_len);
1526 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
1527 QAT_COMN_PTR_TYPE_SGL);
1528 ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
1529 &qat_op_cookie->qat_sgl_list_src,
1530 qat_req->comn_mid.src_length);
1532 PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
1536 if (likely(op->sym->m_dst == NULL))
1537 qat_req->comn_mid.dest_data_addr =
1538 qat_req->comn_mid.src_data_addr =
1539 qat_op_cookie->qat_sgl_src_phys_addr;
1541 ret = qat_sgl_fill_array(op->sym->m_dst,
1543 &qat_op_cookie->qat_sgl_list_dst,
1544 qat_req->comn_mid.dst_length);
1547 PMD_DRV_LOG(ERR, "QAT PMD Cannot "
1552 qat_req->comn_mid.src_data_addr =
1553 qat_op_cookie->qat_sgl_src_phys_addr;
1554 qat_req->comn_mid.dest_data_addr =
1555 qat_op_cookie->qat_sgl_dst_phys_addr;
1558 qat_req->comn_mid.src_data_addr = src_buf_start;
1559 qat_req->comn_mid.dest_data_addr = dst_buf_start;
1562 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1563 rte_hexdump(stdout, "qat_req:", qat_req,
1564 sizeof(struct icp_qat_fw_la_bulk_req));
1565 rte_hexdump(stdout, "src_data:",
1566 rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
1567 rte_pktmbuf_data_len(op->sym->m_src));
1569 uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
1571 ctx->cipher_iv.offset);
1572 rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
1573 ctx->cipher_iv.length);
1577 if (ctx->auth_iv.length) {
1578 uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
1580 ctx->auth_iv.offset);
1581 rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
1582 ctx->auth_iv.length);
1584 rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
1585 ctx->digest_length);
1589 rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
1590 ctx->digest_length);
1591 rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
1598 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
1600 uint32_t div = data >> shift;
1601 uint32_t mult = div << shift;
1606 int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
1607 __rte_unused struct rte_cryptodev_config *config)
1609 PMD_INIT_FUNC_TRACE();
1613 int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
1615 PMD_INIT_FUNC_TRACE();
1619 void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
1621 PMD_INIT_FUNC_TRACE();
1624 int qat_dev_close(struct rte_cryptodev *dev)
1628 PMD_INIT_FUNC_TRACE();
1630 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1631 ret = qat_crypto_sym_qp_release(dev, i);
1639 void qat_dev_info_get(struct rte_cryptodev *dev,
1640 struct rte_cryptodev_info *info)
1642 struct qat_pmd_private *internals = dev->data->dev_private;
1644 PMD_INIT_FUNC_TRACE();
1646 info->max_nb_queue_pairs =
1647 ADF_NUM_SYM_QPS_PER_BUNDLE *
1648 ADF_NUM_BUNDLES_PER_DEV;
1649 info->feature_flags = dev->feature_flags;
1650 info->capabilities = internals->qat_dev_capabilities;
1651 info->sym.max_nb_sessions = internals->max_nb_sessions;
1652 info->driver_id = cryptodev_qat_driver_id;
1653 info->pci_dev = RTE_DEV_TO_PCI(dev->device);
1657 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
1658 struct rte_cryptodev_stats *stats)
1661 struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1663 PMD_INIT_FUNC_TRACE();
1664 if (stats == NULL) {
1665 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1668 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1669 if (qp[i] == NULL) {
1670 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1674 stats->enqueued_count += qp[i]->stats.enqueued_count;
1675 stats->dequeued_count += qp[i]->stats.dequeued_count;
1676 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
1677 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
1681 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
1684 struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1686 PMD_INIT_FUNC_TRACE();
1687 for (i = 0; i < dev->data->nb_queue_pairs; i++)
1688 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
1689 PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");