4 * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/queue.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_tailq.h>
49 #include <rte_malloc.h>
50 #include <rte_launch.h>
52 #include <rte_per_lcore.h>
53 #include <rte_lcore.h>
54 #include <rte_branch_prediction.h>
55 #include <rte_mempool.h>
57 #include <rte_string_fns.h>
58 #include <rte_spinlock.h>
59 #include <rte_hexdump.h>
60 #include <rte_crypto_sym.h>
61 #include <rte_byteorder.h>
64 #include <openssl/evp.h>
68 #include "qat_crypto.h"
69 #include "adf_transport_access_macros.h"
74 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
75 struct qat_pmd_private *internals) {
77 const struct rte_cryptodev_capabilities *capability;
79 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
80 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
81 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
84 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
87 if (capability->sym.cipher.algo == algo)
94 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
95 struct qat_pmd_private *internals) {
97 const struct rte_cryptodev_capabilities *capability;
99 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
100 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
101 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
104 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
107 if (capability->sym.auth.algo == algo)
113 /** Encrypt a single partial block
114 * Depends on openssl libcrypto
115 * Uses ECB+XOR to do CFB encryption, same result, more performant
118 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
119 uint8_t *iv, int ivlen, int srclen,
122 EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
124 uint8_t encrypted_iv[16];
127 /* ECB method: encrypt the IV, then XOR this with plaintext */
128 if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
130 goto cipher_encrypt_err;
132 for (i = 0; i < srclen; i++)
133 *(dst+i) = *(src+i)^(encrypted_iv[i]);
138 PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
142 /** Decrypt a single partial block
143 * Depends on openssl libcrypto
144 * Uses ECB+XOR to do CFB encryption, same result, more performant
147 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
148 uint8_t *iv, int ivlen, int srclen,
151 EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
153 uint8_t encrypted_iv[16];
156 /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
157 if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
159 goto cipher_decrypt_err;
161 for (i = 0; i < srclen; i++)
162 *(dst+i) = *(src+i)^(encrypted_iv[i]);
167 PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed");
171 /** Creates a context in either AES or DES in ECB mode
172 * Depends on openssl libcrypto
175 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
176 enum rte_crypto_cipher_operation direction __rte_unused,
177 uint8_t *key, void **ctx)
179 const EVP_CIPHER *algo = NULL;
181 *ctx = EVP_CIPHER_CTX_new();
188 if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
189 algo = EVP_des_ecb();
191 algo = EVP_aes_128_ecb();
193 /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
194 if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
203 EVP_CIPHER_CTX_free(*ctx);
207 /** Frees a context previously created
208 * Depends on openssl libcrypto
211 bpi_cipher_ctx_free(void *bpi_ctx)
214 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
217 static inline uint32_t
218 adf_modulo(uint32_t data, uint32_t shift);
221 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
222 struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
225 qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
226 struct rte_cryptodev_sym_session *sess)
228 PMD_INIT_FUNC_TRACE();
229 uint8_t index = dev->driver_id;
230 void *sess_priv = get_session_private_data(sess, index);
231 struct qat_session *s = (struct qat_session *)sess_priv;
235 bpi_cipher_ctx_free(s->bpi_ctx);
236 memset(s, 0, qat_crypto_sym_get_session_private_size(dev));
237 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
238 set_session_private_data(sess, index, NULL);
239 rte_mempool_put(sess_mp, sess_priv);
244 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
247 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
248 return ICP_QAT_FW_LA_CMD_CIPHER;
250 /* Authentication Only */
251 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
252 return ICP_QAT_FW_LA_CMD_AUTH;
255 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
256 /* AES-GCM and AES-CCM works with different direction
257 * GCM first encrypts and generate hash where AES-CCM
258 * first generate hash and encrypts. Similar relation
259 * applies to decryption.
261 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
262 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
263 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
265 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
267 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
268 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
270 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
273 if (xform->next == NULL)
276 /* Cipher then Authenticate */
277 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
278 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
279 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
281 /* Authenticate then Cipher */
282 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
283 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
284 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
289 static struct rte_crypto_auth_xform *
290 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
293 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
302 static struct rte_crypto_cipher_xform *
303 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
306 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
307 return &xform->cipher;
316 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
317 struct rte_crypto_sym_xform *xform,
318 struct qat_session *session)
320 struct qat_pmd_private *internals = dev->data->dev_private;
321 struct rte_crypto_cipher_xform *cipher_xform = NULL;
324 /* Get cipher xform from crypto xform chain */
325 cipher_xform = qat_get_cipher_xform(xform);
327 session->cipher_iv.offset = cipher_xform->iv.offset;
328 session->cipher_iv.length = cipher_xform->iv.length;
330 switch (cipher_xform->algo) {
331 case RTE_CRYPTO_CIPHER_AES_CBC:
332 if (qat_alg_validate_aes_key(cipher_xform->key.length,
333 &session->qat_cipher_alg) != 0) {
334 PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
338 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
340 case RTE_CRYPTO_CIPHER_AES_CTR:
341 if (qat_alg_validate_aes_key(cipher_xform->key.length,
342 &session->qat_cipher_alg) != 0) {
343 PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
347 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
349 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
350 if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
351 &session->qat_cipher_alg) != 0) {
352 PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
356 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
358 case RTE_CRYPTO_CIPHER_NULL:
359 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
361 case RTE_CRYPTO_CIPHER_KASUMI_F8:
362 if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
363 &session->qat_cipher_alg) != 0) {
364 PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
368 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
370 case RTE_CRYPTO_CIPHER_3DES_CBC:
371 if (qat_alg_validate_3des_key(cipher_xform->key.length,
372 &session->qat_cipher_alg) != 0) {
373 PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
377 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
379 case RTE_CRYPTO_CIPHER_DES_CBC:
380 if (qat_alg_validate_des_key(cipher_xform->key.length,
381 &session->qat_cipher_alg) != 0) {
382 PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
386 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
388 case RTE_CRYPTO_CIPHER_3DES_CTR:
389 if (qat_alg_validate_3des_key(cipher_xform->key.length,
390 &session->qat_cipher_alg) != 0) {
391 PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
395 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
397 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
398 ret = bpi_cipher_ctx_init(
401 cipher_xform->key.data,
404 PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
407 if (qat_alg_validate_des_key(cipher_xform->key.length,
408 &session->qat_cipher_alg) != 0) {
409 PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
413 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
415 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
416 ret = bpi_cipher_ctx_init(
419 cipher_xform->key.data,
422 PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
425 if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
426 &session->qat_cipher_alg) != 0) {
427 PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
431 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
433 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
434 if (!qat_is_cipher_alg_supported(
435 cipher_xform->algo, internals)) {
436 PMD_DRV_LOG(ERR, "%s not supported on this device",
437 rte_crypto_cipher_algorithm_strings
438 [cipher_xform->algo]);
442 if (qat_alg_validate_zuc_key(cipher_xform->key.length,
443 &session->qat_cipher_alg) != 0) {
444 PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
448 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
450 case RTE_CRYPTO_CIPHER_3DES_ECB:
451 case RTE_CRYPTO_CIPHER_AES_ECB:
452 case RTE_CRYPTO_CIPHER_AES_F8:
453 case RTE_CRYPTO_CIPHER_AES_XTS:
454 case RTE_CRYPTO_CIPHER_ARC4:
455 PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
460 PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
466 if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
467 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
469 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
471 if (qat_alg_aead_session_create_content_desc_cipher(session,
472 cipher_xform->key.data,
473 cipher_xform->key.length)) {
481 if (session->bpi_ctx) {
482 bpi_cipher_ctx_free(session->bpi_ctx);
483 session->bpi_ctx = NULL;
489 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
490 struct rte_crypto_sym_xform *xform,
491 struct rte_cryptodev_sym_session *sess,
492 struct rte_mempool *mempool)
494 void *sess_private_data;
497 if (rte_mempool_get(mempool, &sess_private_data)) {
499 "Couldn't get object from session mempool");
503 ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data);
505 PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure "
506 "session parameters");
508 /* Return session to mempool */
509 rte_mempool_put(mempool, sess_private_data);
513 set_session_private_data(sess, dev->driver_id,
520 qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
521 struct rte_crypto_sym_xform *xform, void *session_private)
523 struct qat_session *session = session_private;
527 PMD_INIT_FUNC_TRACE();
529 /* Set context descriptor physical address */
530 session->cd_paddr = rte_mempool_virt2phy(NULL, session) +
531 offsetof(struct qat_session, cd);
533 session->min_qat_dev_gen = QAT_GEN1;
535 /* Get requested QAT command id */
536 qat_cmd_id = qat_get_cmd_id(xform);
537 if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
538 PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
541 session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
542 switch (session->qat_cmd) {
543 case ICP_QAT_FW_LA_CMD_CIPHER:
544 ret = qat_crypto_sym_configure_session_cipher(dev, xform, session);
548 case ICP_QAT_FW_LA_CMD_AUTH:
549 ret = qat_crypto_sym_configure_session_auth(dev, xform, session);
553 case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
554 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
555 ret = qat_crypto_sym_configure_session_aead(xform,
560 ret = qat_crypto_sym_configure_session_cipher(dev,
564 ret = qat_crypto_sym_configure_session_auth(dev,
570 case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
571 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
572 ret = qat_crypto_sym_configure_session_aead(xform,
577 ret = qat_crypto_sym_configure_session_auth(dev,
581 ret = qat_crypto_sym_configure_session_cipher(dev,
587 case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
588 case ICP_QAT_FW_LA_CMD_TRNG_TEST:
589 case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
590 case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
591 case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
592 case ICP_QAT_FW_LA_CMD_MGF1:
593 case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
594 case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
595 case ICP_QAT_FW_LA_CMD_DELIMITER:
596 PMD_DRV_LOG(ERR, "Unsupported Service %u",
600 PMD_DRV_LOG(ERR, "Unsupported Service %u",
609 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
610 struct rte_crypto_sym_xform *xform,
611 struct qat_session *session)
613 struct rte_crypto_auth_xform *auth_xform = NULL;
614 struct qat_pmd_private *internals = dev->data->dev_private;
615 auth_xform = qat_get_auth_xform(xform);
616 uint8_t *key_data = auth_xform->key.data;
617 uint8_t key_length = auth_xform->key.length;
619 switch (auth_xform->algo) {
620 case RTE_CRYPTO_AUTH_SHA1_HMAC:
621 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
623 case RTE_CRYPTO_AUTH_SHA224_HMAC:
624 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
626 case RTE_CRYPTO_AUTH_SHA256_HMAC:
627 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
629 case RTE_CRYPTO_AUTH_SHA384_HMAC:
630 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
632 case RTE_CRYPTO_AUTH_SHA512_HMAC:
633 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
635 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
636 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
638 case RTE_CRYPTO_AUTH_AES_GMAC:
639 if (qat_alg_validate_aes_key(auth_xform->key.length,
640 &session->qat_cipher_alg) != 0) {
641 PMD_DRV_LOG(ERR, "Invalid AES key size");
644 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
645 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
648 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
649 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
651 case RTE_CRYPTO_AUTH_MD5_HMAC:
652 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
654 case RTE_CRYPTO_AUTH_NULL:
655 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
657 case RTE_CRYPTO_AUTH_KASUMI_F9:
658 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
660 case RTE_CRYPTO_AUTH_ZUC_EIA3:
661 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
662 PMD_DRV_LOG(ERR, "%s not supported on this device",
663 rte_crypto_auth_algorithm_strings
667 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
669 case RTE_CRYPTO_AUTH_SHA1:
670 case RTE_CRYPTO_AUTH_SHA256:
671 case RTE_CRYPTO_AUTH_SHA512:
672 case RTE_CRYPTO_AUTH_SHA224:
673 case RTE_CRYPTO_AUTH_SHA384:
674 case RTE_CRYPTO_AUTH_MD5:
675 case RTE_CRYPTO_AUTH_AES_CMAC:
676 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
677 PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
681 PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
686 session->auth_iv.offset = auth_xform->iv.offset;
687 session->auth_iv.length = auth_xform->iv.length;
689 if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
690 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
691 session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
692 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
694 * It needs to create cipher desc content first,
695 * then authentication
697 if (qat_alg_aead_session_create_content_desc_cipher(session,
698 auth_xform->key.data,
699 auth_xform->key.length))
702 if (qat_alg_aead_session_create_content_desc_auth(session,
706 auth_xform->digest_length,
710 session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
711 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
713 * It needs to create authentication desc content first,
716 if (qat_alg_aead_session_create_content_desc_auth(session,
720 auth_xform->digest_length,
724 if (qat_alg_aead_session_create_content_desc_cipher(session,
725 auth_xform->key.data,
726 auth_xform->key.length))
729 /* Restore to authentication only only */
730 session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
732 if (qat_alg_aead_session_create_content_desc_auth(session,
736 auth_xform->digest_length,
741 session->digest_length = auth_xform->digest_length;
746 qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
747 struct qat_session *session)
749 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
750 enum rte_crypto_auth_operation crypto_operation;
753 * Store AEAD IV parameters as cipher IV,
754 * to avoid unnecessary memory usage
756 session->cipher_iv.offset = xform->aead.iv.offset;
757 session->cipher_iv.length = xform->aead.iv.length;
759 switch (aead_xform->algo) {
760 case RTE_CRYPTO_AEAD_AES_GCM:
761 if (qat_alg_validate_aes_key(aead_xform->key.length,
762 &session->qat_cipher_alg) != 0) {
763 PMD_DRV_LOG(ERR, "Invalid AES key size");
766 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
767 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
769 case RTE_CRYPTO_AEAD_AES_CCM:
770 if (qat_alg_validate_aes_key(aead_xform->key.length,
771 &session->qat_cipher_alg) != 0) {
772 PMD_DRV_LOG(ERR, "Invalid AES key size");
775 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
776 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
779 PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
784 if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
785 aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
786 (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
787 aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
788 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
790 * It needs to create cipher desc content first,
791 * then authentication
794 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
795 RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
797 if (qat_alg_aead_session_create_content_desc_cipher(session,
798 aead_xform->key.data,
799 aead_xform->key.length))
802 if (qat_alg_aead_session_create_content_desc_auth(session,
803 aead_xform->key.data,
804 aead_xform->key.length,
805 aead_xform->aad_length,
806 aead_xform->digest_length,
810 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
812 * It needs to create authentication desc content first,
816 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
817 RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
819 if (qat_alg_aead_session_create_content_desc_auth(session,
820 aead_xform->key.data,
821 aead_xform->key.length,
822 aead_xform->aad_length,
823 aead_xform->digest_length,
827 if (qat_alg_aead_session_create_content_desc_cipher(session,
828 aead_xform->key.data,
829 aead_xform->key.length))
833 session->digest_length = aead_xform->digest_length;
837 unsigned qat_crypto_sym_get_session_private_size(
838 struct rte_cryptodev *dev __rte_unused)
840 return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
843 static inline uint32_t
844 qat_bpicipher_preprocess(struct qat_session *ctx,
845 struct rte_crypto_op *op)
847 uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
848 struct rte_crypto_sym_op *sym_op = op->sym;
849 uint8_t last_block_len = block_len > 0 ?
850 sym_op->cipher.data.length % block_len : 0;
852 if (last_block_len &&
853 ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
855 /* Decrypt last block */
856 uint8_t *last_block, *dst, *iv;
857 uint32_t last_block_offset = sym_op->cipher.data.offset +
858 sym_op->cipher.data.length - last_block_len;
859 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
860 uint8_t *, last_block_offset);
862 if (unlikely(sym_op->m_dst != NULL))
863 /* out-of-place operation (OOP) */
864 dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
865 uint8_t *, last_block_offset);
869 if (last_block_len < sym_op->cipher.data.length)
870 /* use previous block ciphertext as IV */
871 iv = last_block - block_len;
873 /* runt block, i.e. less than one full block */
874 iv = rte_crypto_op_ctod_offset(op, uint8_t *,
875 ctx->cipher_iv.offset);
877 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
878 rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
880 if (sym_op->m_dst != NULL)
881 rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
884 bpi_cipher_decrypt(last_block, dst, iv, block_len,
885 last_block_len, ctx->bpi_ctx);
886 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
887 rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
889 if (sym_op->m_dst != NULL)
890 rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
895 return sym_op->cipher.data.length - last_block_len;
898 static inline uint32_t
899 qat_bpicipher_postprocess(struct qat_session *ctx,
900 struct rte_crypto_op *op)
902 uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
903 struct rte_crypto_sym_op *sym_op = op->sym;
904 uint8_t last_block_len = block_len > 0 ?
905 sym_op->cipher.data.length % block_len : 0;
907 if (last_block_len > 0 &&
908 ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
910 /* Encrypt last block */
911 uint8_t *last_block, *dst, *iv;
912 uint32_t last_block_offset;
914 last_block_offset = sym_op->cipher.data.offset +
915 sym_op->cipher.data.length - last_block_len;
916 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
917 uint8_t *, last_block_offset);
919 if (unlikely(sym_op->m_dst != NULL))
920 /* out-of-place operation (OOP) */
921 dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
922 uint8_t *, last_block_offset);
926 if (last_block_len < sym_op->cipher.data.length)
927 /* use previous block ciphertext as IV */
928 iv = dst - block_len;
930 /* runt block, i.e. less than one full block */
931 iv = rte_crypto_op_ctod_offset(op, uint8_t *,
932 ctx->cipher_iv.offset);
934 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
935 rte_hexdump(stdout, "BPI: src before post-process:", last_block,
937 if (sym_op->m_dst != NULL)
938 rte_hexdump(stdout, "BPI: dst before post-process:",
939 dst, last_block_len);
941 bpi_cipher_encrypt(last_block, dst, iv, block_len,
942 last_block_len, ctx->bpi_ctx);
943 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
944 rte_hexdump(stdout, "BPI: src after post-process:", last_block,
946 if (sym_op->m_dst != NULL)
947 rte_hexdump(stdout, "BPI: dst after post-process:", dst,
951 return sym_op->cipher.data.length - last_block_len;
955 txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
956 WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
957 q->hw_queue_number, q->tail);
958 q->nb_pending_requests = 0;
959 q->csr_tail = q->tail;
963 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
966 register struct qat_queue *queue;
967 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
968 register uint32_t nb_ops_sent = 0;
969 register struct rte_crypto_op **cur_op = ops;
971 uint16_t nb_ops_possible = nb_ops;
972 register uint8_t *base_addr;
973 register uint32_t tail;
976 if (unlikely(nb_ops == 0))
979 /* read params used a lot in main loop into registers */
980 queue = &(tmp_qp->tx_q);
981 base_addr = (uint8_t *)queue->base_addr;
984 /* Find how many can actually fit on the ring */
985 tmp_qp->inflights16 += nb_ops;
986 overflow = tmp_qp->inflights16 - queue->max_inflights;
988 tmp_qp->inflights16 -= overflow;
989 nb_ops_possible = nb_ops - overflow;
990 if (nb_ops_possible == 0)
994 while (nb_ops_sent != nb_ops_possible) {
995 ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
996 tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
998 tmp_qp->stats.enqueue_err_count++;
1000 * This message cannot be enqueued,
1001 * decrease number of ops that wasn't sent
1003 tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
1004 if (nb_ops_sent == 0)
1009 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
1015 tmp_qp->stats.enqueued_count += nb_ops_sent;
1016 queue->nb_pending_requests += nb_ops_sent;
1017 if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
1018 queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
1019 txq_write_tail(tmp_qp, queue);
1025 void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
1027 uint32_t old_head, new_head;
1030 old_head = q->csr_head;
1032 max_head = qp->nb_descriptors * q->msg_size;
1034 /* write out free descriptors */
1035 void *cur_desc = (uint8_t *)q->base_addr + old_head;
1037 if (new_head < old_head) {
1038 memset(cur_desc, ADF_RING_EMPTY_SIG, max_head - old_head);
1039 memset(q->base_addr, ADF_RING_EMPTY_SIG, new_head);
1041 memset(cur_desc, ADF_RING_EMPTY_SIG, new_head - old_head);
1043 q->nb_processed_responses = 0;
1044 q->csr_head = new_head;
1046 /* write current head to CSR */
1047 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
1048 q->hw_queue_number, new_head);
1052 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
1055 struct qat_queue *rx_queue, *tx_queue;
1056 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
1057 uint32_t msg_counter = 0;
1058 struct rte_crypto_op *rx_op;
1059 struct icp_qat_fw_comn_resp *resp_msg;
1062 rx_queue = &(tmp_qp->rx_q);
1063 tx_queue = &(tmp_qp->tx_q);
1064 head = rx_queue->head;
1065 resp_msg = (struct icp_qat_fw_comn_resp *)
1066 ((uint8_t *)rx_queue->base_addr + head);
1068 while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
1069 msg_counter != nb_ops) {
1070 rx_op = (struct rte_crypto_op *)(uintptr_t)
1071 (resp_msg->opaque_data);
1073 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
1074 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
1075 sizeof(struct icp_qat_fw_comn_resp));
1077 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
1078 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
1079 resp_msg->comn_hdr.comn_status)) {
1080 rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1082 struct qat_session *sess = (struct qat_session *)
1083 get_session_private_data(
1084 rx_op->sym->session,
1085 cryptodev_qat_driver_id);
1088 qat_bpicipher_postprocess(sess, rx_op);
1089 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1092 head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
1093 resp_msg = (struct icp_qat_fw_comn_resp *)
1094 ((uint8_t *)rx_queue->base_addr + head);
1099 if (msg_counter > 0) {
1100 rx_queue->head = head;
1101 tmp_qp->stats.dequeued_count += msg_counter;
1102 rx_queue->nb_processed_responses += msg_counter;
1103 tmp_qp->inflights16 -= msg_counter;
1105 if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
1106 rxq_free_desc(tmp_qp, rx_queue);
1108 /* also check if tail needs to be advanced */
1109 if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
1110 tx_queue->tail != tx_queue->csr_tail) {
1111 txq_write_tail(tmp_qp, tx_queue);
1117 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
1118 struct qat_alg_buf_list *list, uint32_t data_len)
1122 uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
1123 buff_start + rte_pktmbuf_data_len(buf);
1125 list->bufers[0].addr = buff_start;
1126 list->bufers[0].resrvd = 0;
1127 list->bufers[0].len = buf_len;
1129 if (data_len <= buf_len) {
1130 list->num_bufs = nr;
1131 list->bufers[0].len = data_len;
1137 if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
1138 PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
1140 QAT_SGL_MAX_NUMBER);
1144 list->bufers[nr].len = rte_pktmbuf_data_len(buf);
1145 list->bufers[nr].resrvd = 0;
1146 list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
1148 buf_len += list->bufers[nr].len;
1151 if (buf_len > data_len) {
1152 list->bufers[nr].len -=
1158 list->num_bufs = nr;
1164 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
1165 struct icp_qat_fw_la_cipher_req_params *cipher_param,
1166 struct rte_crypto_op *op,
1167 struct icp_qat_fw_la_bulk_req *qat_req)
1169 /* copy IV into request if it fits */
1170 if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
1171 rte_memcpy(cipher_param->u.cipher_IV_array,
1172 rte_crypto_op_ctod_offset(op, uint8_t *,
1176 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
1177 qat_req->comn_hdr.serv_specif_flags,
1178 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
1179 cipher_param->u.s.cipher_IV_ptr =
1180 rte_crypto_op_ctophys_offset(op,
1185 /** Set IV for CCM is special case, 0th byte is set to q-1
1186 * where q is padding of nonce in 16 byte block
1189 set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
1190 struct icp_qat_fw_la_cipher_req_params *cipher_param,
1191 struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
1193 rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
1194 ICP_QAT_HW_CCM_NONCE_OFFSET,
1195 rte_crypto_op_ctod_offset(op, uint8_t *,
1196 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
1198 *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
1199 q - ICP_QAT_HW_CCM_NONCE_OFFSET;
1201 if (aad_len_field_sz)
1202 rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
1203 rte_crypto_op_ctod_offset(op, uint8_t *,
1204 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
1209 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
1210 struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
1213 struct qat_session *ctx;
1214 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1215 struct icp_qat_fw_la_auth_req_params *auth_param;
1216 register struct icp_qat_fw_la_bulk_req *qat_req;
1217 uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
1218 uint32_t cipher_len = 0, cipher_ofs = 0;
1219 uint32_t auth_len = 0, auth_ofs = 0;
1220 uint32_t min_ofs = 0;
1221 uint64_t src_buf_start = 0, dst_buf_start = 0;
1224 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1225 if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
1226 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
1227 "operation requests, op (%p) is not a "
1228 "symmetric operation.", op);
1232 if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
1233 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
1234 " requests, op (%p) is sessionless.", op);
1238 ctx = (struct qat_session *)get_session_private_data(
1239 op->sym->session, cryptodev_qat_driver_id);
1241 if (unlikely(ctx == NULL)) {
1242 PMD_DRV_LOG(ERR, "Session was not created for this device");
1246 if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
1247 PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
1248 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1254 qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
1255 rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
1256 qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
1257 cipher_param = (void *)&qat_req->serv_specif_rqpars;
1258 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
1260 if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
1261 ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1262 /* AES-GCM or AES-CCM */
1263 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1264 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
1265 (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
1266 && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
1267 && ctx->qat_hash_alg ==
1268 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
1274 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1277 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1284 if (ctx->qat_cipher_alg ==
1285 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
1286 ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
1287 ctx->qat_cipher_alg ==
1288 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1291 (cipher_param->cipher_length % BYTE_LENGTH != 0)
1292 || (cipher_param->cipher_offset
1293 % BYTE_LENGTH != 0))) {
1295 "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
1296 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1299 cipher_len = op->sym->cipher.data.length >> 3;
1300 cipher_ofs = op->sym->cipher.data.offset >> 3;
1302 } else if (ctx->bpi_ctx) {
1303 /* DOCSIS - only send complete blocks to device
1304 * Process any partial block using CFB mode.
1305 * Even if 0 complete blocks, still send this to device
1306 * to get into rx queue for post-process and dequeuing
1308 cipher_len = qat_bpicipher_preprocess(ctx, op);
1309 cipher_ofs = op->sym->cipher.data.offset;
1311 cipher_len = op->sym->cipher.data.length;
1312 cipher_ofs = op->sym->cipher.data.offset;
1315 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
1316 cipher_param, op, qat_req);
1317 min_ofs = cipher_ofs;
1322 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
1323 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
1324 ctx->qat_hash_alg ==
1325 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
1326 if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
1327 || (auth_param->auth_len % BYTE_LENGTH != 0))) {
1329 "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
1330 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1333 auth_ofs = op->sym->auth.data.offset >> 3;
1334 auth_len = op->sym->auth.data.length >> 3;
1336 auth_param->u1.aad_adr =
1337 rte_crypto_op_ctophys_offset(op,
1338 ctx->auth_iv.offset);
1340 } else if (ctx->qat_hash_alg ==
1341 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1342 ctx->qat_hash_alg ==
1343 ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1345 set_cipher_iv(ctx->auth_iv.length,
1346 ctx->auth_iv.offset,
1347 cipher_param, op, qat_req);
1348 auth_ofs = op->sym->auth.data.offset;
1349 auth_len = op->sym->auth.data.length;
1351 auth_param->u1.aad_adr = 0;
1352 auth_param->u2.aad_sz = 0;
1355 * If len(iv)==12B fw computes J0
1357 if (ctx->auth_iv.length == 12) {
1358 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1359 qat_req->comn_hdr.serv_specif_flags,
1360 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1364 auth_ofs = op->sym->auth.data.offset;
1365 auth_len = op->sym->auth.data.length;
1370 auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
1376 * This address may used for setting AAD physical pointer
1377 * into IV offset from op
1379 phys_addr_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
1380 if (ctx->qat_hash_alg ==
1381 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1382 ctx->qat_hash_alg ==
1383 ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1385 * If len(iv)==12B fw computes J0
1387 if (ctx->cipher_iv.length == 12) {
1388 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1389 qat_req->comn_hdr.serv_specif_flags,
1390 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1393 set_cipher_iv(ctx->cipher_iv.length,
1394 ctx->cipher_iv.offset,
1395 cipher_param, op, qat_req);
1397 } else if (ctx->qat_hash_alg ==
1398 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
1400 /* In case of AES-CCM this may point to user selected memory
1401 * or iv offset in cypto_op
1403 uint8_t *aad_data = op->sym->aead.aad.data;
1404 /* This is true AAD length, it not includes 18 bytes of
1407 uint8_t aad_ccm_real_len = 0;
1409 uint8_t aad_len_field_sz = 0;
1410 uint32_t msg_len_be =
1411 rte_bswap32(op->sym->aead.data.length);
1413 if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
1414 aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
1415 aad_ccm_real_len = ctx->aad_len -
1416 ICP_QAT_HW_CCM_AAD_B0_LEN -
1417 ICP_QAT_HW_CCM_AAD_LEN_INFO;
1420 * aad_len not greater than 18, so no actual aad data,
1421 * then use IV after op for B0 block
1423 aad_data = rte_crypto_op_ctod_offset(op, uint8_t *,
1424 ctx->cipher_iv.offset);
1425 aad_phys_addr_aead =
1426 rte_crypto_op_ctophys_offset(op,
1427 ctx->cipher_iv.offset);
1430 uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
1432 aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
1433 ctx->digest_length, q);
1435 if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
1436 memcpy(aad_data + ctx->cipher_iv.length +
1437 ICP_QAT_HW_CCM_NONCE_OFFSET
1438 + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
1439 (uint8_t *)&msg_len_be,
1440 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
1442 memcpy(aad_data + ctx->cipher_iv.length +
1443 ICP_QAT_HW_CCM_NONCE_OFFSET,
1444 (uint8_t *)&msg_len_be
1445 + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
1449 if (aad_len_field_sz > 0) {
1450 *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
1451 = rte_bswap16(aad_ccm_real_len);
1453 if ((aad_ccm_real_len + aad_len_field_sz)
1454 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
1455 uint8_t pad_len = 0;
1456 uint8_t pad_idx = 0;
1458 pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
1459 ((aad_ccm_real_len + aad_len_field_sz) %
1460 ICP_QAT_HW_CCM_AAD_B0_LEN);
1461 pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
1462 aad_ccm_real_len + aad_len_field_sz;
1463 memset(&aad_data[pad_idx],
1469 set_cipher_iv_ccm(ctx->cipher_iv.length,
1470 ctx->cipher_iv.offset,
1471 cipher_param, op, q,
1476 cipher_len = op->sym->aead.data.length;
1477 cipher_ofs = op->sym->aead.data.offset;
1478 auth_len = op->sym->aead.data.length;
1479 auth_ofs = op->sym->aead.data.offset;
1481 auth_param->u1.aad_adr = aad_phys_addr_aead;
1482 auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
1483 min_ofs = op->sym->aead.data.offset;
1486 if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
1489 /* adjust for chain case */
1490 if (do_cipher && do_auth)
1491 min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
1493 if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
1496 if (unlikely(op->sym->m_dst != NULL)) {
1497 /* Out-of-place operation (OOP)
1498 * Don't align DMA start. DMA the minimum data-set
1499 * so as not to overwrite data in dest buffer
1502 rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
1504 rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
1507 /* In-place operation
1508 * Start DMA at nearest aligned address below min_ofs
1511 rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
1512 & QAT_64_BTYE_ALIGN_MASK;
1514 if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
1515 rte_pktmbuf_headroom(op->sym->m_src))
1517 /* alignment has pushed addr ahead of start of mbuf
1518 * so revert and take the performance hit
1521 rte_pktmbuf_mtophys_offset(op->sym->m_src,
1524 dst_buf_start = src_buf_start;
1527 if (do_cipher || do_aead) {
1528 cipher_param->cipher_offset =
1529 (uint32_t)rte_pktmbuf_mtophys_offset(
1530 op->sym->m_src, cipher_ofs) - src_buf_start;
1531 cipher_param->cipher_length = cipher_len;
1533 cipher_param->cipher_offset = 0;
1534 cipher_param->cipher_length = 0;
1537 if (do_auth || do_aead) {
1538 auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
1539 op->sym->m_src, auth_ofs) - src_buf_start;
1540 auth_param->auth_len = auth_len;
1542 auth_param->auth_off = 0;
1543 auth_param->auth_len = 0;
1546 qat_req->comn_mid.dst_length =
1547 qat_req->comn_mid.src_length =
1548 (cipher_param->cipher_offset + cipher_param->cipher_length)
1549 > (auth_param->auth_off + auth_param->auth_len) ?
1550 (cipher_param->cipher_offset + cipher_param->cipher_length)
1551 : (auth_param->auth_off + auth_param->auth_len);
1555 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
1556 QAT_COMN_PTR_TYPE_SGL);
1557 ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
1558 &qat_op_cookie->qat_sgl_list_src,
1559 qat_req->comn_mid.src_length);
1561 PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
1565 if (likely(op->sym->m_dst == NULL))
1566 qat_req->comn_mid.dest_data_addr =
1567 qat_req->comn_mid.src_data_addr =
1568 qat_op_cookie->qat_sgl_src_phys_addr;
1570 ret = qat_sgl_fill_array(op->sym->m_dst,
1572 &qat_op_cookie->qat_sgl_list_dst,
1573 qat_req->comn_mid.dst_length);
1576 PMD_DRV_LOG(ERR, "QAT PMD Cannot "
1581 qat_req->comn_mid.src_data_addr =
1582 qat_op_cookie->qat_sgl_src_phys_addr;
1583 qat_req->comn_mid.dest_data_addr =
1584 qat_op_cookie->qat_sgl_dst_phys_addr;
1587 qat_req->comn_mid.src_data_addr = src_buf_start;
1588 qat_req->comn_mid.dest_data_addr = dst_buf_start;
1591 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1592 rte_hexdump(stdout, "qat_req:", qat_req,
1593 sizeof(struct icp_qat_fw_la_bulk_req));
1594 rte_hexdump(stdout, "src_data:",
1595 rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
1596 rte_pktmbuf_data_len(op->sym->m_src));
1598 uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
1600 ctx->cipher_iv.offset);
1601 rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
1602 ctx->cipher_iv.length);
1606 if (ctx->auth_iv.length) {
1607 uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
1609 ctx->auth_iv.offset);
1610 rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
1611 ctx->auth_iv.length);
1613 rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
1614 ctx->digest_length);
1618 rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
1619 ctx->digest_length);
1620 rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
1627 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
1629 uint32_t div = data >> shift;
1630 uint32_t mult = div << shift;
1635 int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
1636 __rte_unused struct rte_cryptodev_config *config)
1638 PMD_INIT_FUNC_TRACE();
1642 int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
1644 PMD_INIT_FUNC_TRACE();
1648 void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
1650 PMD_INIT_FUNC_TRACE();
1653 int qat_dev_close(struct rte_cryptodev *dev)
1657 PMD_INIT_FUNC_TRACE();
1659 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1660 ret = qat_crypto_sym_qp_release(dev, i);
1668 void qat_dev_info_get(struct rte_cryptodev *dev,
1669 struct rte_cryptodev_info *info)
1671 struct qat_pmd_private *internals = dev->data->dev_private;
1673 PMD_INIT_FUNC_TRACE();
1675 info->max_nb_queue_pairs =
1676 ADF_NUM_SYM_QPS_PER_BUNDLE *
1677 ADF_NUM_BUNDLES_PER_DEV;
1678 info->feature_flags = dev->feature_flags;
1679 info->capabilities = internals->qat_dev_capabilities;
1680 info->sym.max_nb_sessions = internals->max_nb_sessions;
1681 info->driver_id = cryptodev_qat_driver_id;
1682 info->pci_dev = RTE_DEV_TO_PCI(dev->device);
1686 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
1687 struct rte_cryptodev_stats *stats)
1690 struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1692 PMD_INIT_FUNC_TRACE();
1693 if (stats == NULL) {
1694 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1697 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1698 if (qp[i] == NULL) {
1699 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1703 stats->enqueued_count += qp[i]->stats.enqueued_count;
1704 stats->dequeued_count += qp[i]->stats.dequeued_count;
1705 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
1706 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
1710 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
1713 struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1715 PMD_INIT_FUNC_TRACE();
1716 for (i = 0; i < dev->data->nb_queue_pairs; i++)
1717 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
1718 PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");