X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fqat%2Fqat_crypto.c;h=84c26a8062d8786dd0967ac2eb18be8ec34376b3;hb=581c39b1cb7d82d301a8ec6a16778bcf6ee11c7c;hp=f8db12f946b68ff817791eee8735c1e2ae6e4651;hpb=e1b7f509e6f274e9fd5996454eee9259546a19d4;p=dpdk.git diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index f8db12f946..84c26a8062 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -1,1204 +1,176 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021 Intel Corporation */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "qat_logs.h" -#include "qat_algs.h" +#include "qat_device.h" +#include "qat_qp.h" #include "qat_crypto.h" -#include "adf_transport_access_macros.h" - -#define BYTE_LENGTH 8 - -static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = { - { /* SHA1 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .block_size = 64, - .key_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - .digest_size = { - .min = 20, - .max = 20, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* SHA224 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, - .block_size = 64, - .key_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - .digest_size = { - .min = 28, - .max = 28, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* SHA256 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, - .block_size = 64, - .key_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - .digest_size = { - .min = 32, - .max = 32, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* SHA384 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, - .block_size = 64, - .key_size = { - .min = 128, - .max = 128, - .increment = 0 - }, - .digest_size = { - .min = 48, - .max = 48, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* SHA512 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, - .block_size = 128, - .key_size = { - .min = 128, - .max = 128, - .increment = 0 - }, - .digest_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* MD5 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_MD5_HMAC, - .block_size = 64, - .key_size = { - .min = 8, - .max = 64, - .increment = 8 - }, - .digest_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* AES XCBC MAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, - .block_size = 16, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .digest_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* AES GCM (AUTH) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_AES_GCM, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .digest_size = { - .min = 8, - .max = 16, - .increment = 4 - }, - .aad_size = { - .min = 8, - .max = 12, - .increment = 4 - } - }, } - }, } - }, - { /* AES GMAC (AUTH) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_AES_GMAC, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .digest_size = { - .min = 8, - .max = 16, - .increment = 4 - }, - .aad_size = { - .min = 1, - .max = 65535, - .increment = 1 - } - }, } - }, } - }, - { /* SNOW 3G (UIA2) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, - .block_size = 16, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .digest_size = { - .min = 4, - .max = 4, - .increment = 0 - }, - .aad_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* AES GCM (CIPHER) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_AES_GCM, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* AES CBC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_AES_CBC, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* SNOW 3G (UEA2) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, - .block_size = 16, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* AES CTR */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_AES_CTR, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* NULL (AUTH) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_NULL, - .block_size = 1, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .aad_size = { 0 } - }, }, - }, }, - }, - { /* NULL (CIPHER) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_NULL, - .block_size = 1, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .iv_size = { - .min = 0, - .max = 0, - .increment = 0 - } - }, }, - }, } - }, - { /* KASUMI (F8) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, - .block_size = 8, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .iv_size = { - .min = 8, - .max = 8, - .increment = 0 - } - }, } - }, } - }, - { /* KASUMI (F9) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_KASUMI_F9, - .block_size = 8, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .digest_size = { - .min = 4, - .max = 4, - .increment = 0 - }, - .aad_size = { - .min = 8, - .max = 8, - .increment = 0 - } - }, } - }, } - }, - { /* 3DES CBC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_3DES_CBC, - .block_size = 8, - .key_size = { - .min = 16, - .max = 24, - .increment = 8 - }, - .iv_size = { - .min = 8, - .max = 8, - .increment = 0 - } - }, } - }, } - }, - { /* 3DES CTR */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_3DES_CTR, - .block_size = 8, - .key_size = { - .min = 16, - .max = 24, - .increment = 8 - }, - .iv_size = { - .min = 8, - .max = 8, - .increment = 0 - } - }, } - }, } - }, - RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() -}; +#include "qat_sym.h" +#include "qat_asym.h" -static inline uint32_t -adf_modulo(uint32_t data, uint32_t shift); - -static inline int -qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg); - -void qat_crypto_sym_clear_session(struct rte_cryptodev *dev, - void *session) +int +qat_cryptodev_config(__rte_unused struct rte_cryptodev *dev, + __rte_unused struct rte_cryptodev_config *config) { - struct qat_session *sess = session; - phys_addr_t cd_paddr; - - PMD_INIT_FUNC_TRACE(); - if (session) { - cd_paddr = sess->cd_paddr; - memset(sess, 0, qat_crypto_sym_get_session_private_size(dev)); - sess->cd_paddr = cd_paddr; - } else - PMD_DRV_LOG(ERR, "NULL session"); + return 0; } -static int -qat_get_cmd_id(const struct rte_crypto_sym_xform *xform) +int +qat_cryptodev_start(__rte_unused struct rte_cryptodev *dev) { - /* Cipher Only */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) - return ICP_QAT_FW_LA_CMD_CIPHER; - - /* Authentication Only */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL) - return ICP_QAT_FW_LA_CMD_AUTH; - - if (xform->next == NULL) - return -1; - - /* Cipher then Authenticate */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && - xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) - return ICP_QAT_FW_LA_CMD_CIPHER_HASH; - - /* Authenticate then Cipher */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && - xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) - return ICP_QAT_FW_LA_CMD_HASH_CIPHER; - - return -1; + return 0; } -static struct rte_crypto_auth_xform * -qat_get_auth_xform(struct rte_crypto_sym_xform *xform) +void +qat_cryptodev_stop(__rte_unused struct rte_cryptodev *dev) { - do { - if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) - return &xform->auth; - - xform = xform->next; - } while (xform); - - return NULL; } -static struct rte_crypto_cipher_xform * -qat_get_cipher_xform(struct rte_crypto_sym_xform *xform) -{ - do { - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) - return &xform->cipher; - - xform = xform->next; - } while (xform); - - return NULL; -} -void * -qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, void *session_private) +int +qat_cryptodev_close(struct rte_cryptodev *dev) { - struct qat_pmd_private *internals = dev->data->dev_private; - - struct qat_session *session = session_private; - - struct rte_crypto_cipher_xform *cipher_xform = NULL; - - /* Get cipher xform from crypto xform chain */ - cipher_xform = qat_get_cipher_xform(xform); + int i, ret; - switch (cipher_xform->algo) { - case RTE_CRYPTO_CIPHER_AES_CBC: - if (qat_alg_validate_aes_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES cipher key size"); - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; - break; - case RTE_CRYPTO_CIPHER_AES_GCM: - if (qat_alg_validate_aes_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES cipher key size"); - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - break; - case RTE_CRYPTO_CIPHER_AES_CTR: - if (qat_alg_validate_aes_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES cipher key size"); - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - break; - case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: - if (qat_alg_validate_snow3g_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size"); - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; - break; - case RTE_CRYPTO_CIPHER_NULL: - session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; - break; - case RTE_CRYPTO_CIPHER_KASUMI_F8: - if (qat_alg_validate_kasumi_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size"); - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE; - break; - case RTE_CRYPTO_CIPHER_3DES_CBC: - if (qat_alg_validate_3des_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size"); - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; - break; - case RTE_CRYPTO_CIPHER_3DES_CTR: - if (qat_alg_validate_3des_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size"); - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - break; - case RTE_CRYPTO_CIPHER_3DES_ECB: - case RTE_CRYPTO_CIPHER_AES_ECB: - case RTE_CRYPTO_CIPHER_AES_CCM: - case RTE_CRYPTO_CIPHER_AES_F8: - case RTE_CRYPTO_CIPHER_AES_XTS: - case RTE_CRYPTO_CIPHER_ARC4: - case RTE_CRYPTO_CIPHER_ZUC_EEA3: - PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u", - cipher_xform->algo); - goto error_out; - default: - PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n", - cipher_xform->algo); - goto error_out; + for (i = 0; i < dev->data->nb_queue_pairs; i++) { + ret = dev->dev_ops->queue_pair_release(dev, i); + if (ret < 0) + return ret; } - if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) - session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; - else - session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; - - if (qat_alg_aead_session_create_content_desc_cipher(session, - cipher_xform->key.data, - cipher_xform->key.length)) - goto error_out; - - return session; - -error_out: - rte_mempool_put(internals->sess_mp, session); - return NULL; + return 0; } - -void * -qat_crypto_sym_configure_session(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, void *session_private) +void +qat_cryptodev_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *info) { - struct qat_pmd_private *internals = dev->data->dev_private; - - struct qat_session *session = session_private; - - int qat_cmd_id; + struct qat_cryptodev_private *qat_private = dev->data->dev_private; + struct qat_pci_device *qat_dev = qat_private->qat_dev; + enum qat_service_type service_type = qat_private->service_type; - PMD_INIT_FUNC_TRACE(); + if (info != NULL) { + info->max_nb_queue_pairs = + qat_qps_per_service(qat_dev, service_type); + info->feature_flags = dev->feature_flags; + info->capabilities = qat_private->qat_dev_capabilities; + if (service_type == QAT_SERVICE_ASYMMETRIC) + info->driver_id = qat_asym_driver_id; - /* Get requested QAT command id */ - qat_cmd_id = qat_get_cmd_id(xform); - if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) { - PMD_DRV_LOG(ERR, "Unsupported xform chain requested"); - goto error_out; - } - session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id; - switch (session->qat_cmd) { - case ICP_QAT_FW_LA_CMD_CIPHER: - session = qat_crypto_sym_configure_session_cipher(dev, xform, session); - break; - case ICP_QAT_FW_LA_CMD_AUTH: - session = qat_crypto_sym_configure_session_auth(dev, xform, session); - break; - case ICP_QAT_FW_LA_CMD_CIPHER_HASH: - session = qat_crypto_sym_configure_session_cipher(dev, xform, session); - session = qat_crypto_sym_configure_session_auth(dev, xform, session); - break; - case ICP_QAT_FW_LA_CMD_HASH_CIPHER: - session = qat_crypto_sym_configure_session_auth(dev, xform, session); - session = qat_crypto_sym_configure_session_cipher(dev, xform, session); - break; - case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: - case ICP_QAT_FW_LA_CMD_TRNG_TEST: - case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE: - case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE: - case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE: - case ICP_QAT_FW_LA_CMD_MGF1: - case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP: - case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP: - case ICP_QAT_FW_LA_CMD_DELIMITER: - PMD_DRV_LOG(ERR, "Unsupported Service %u", - session->qat_cmd); - goto error_out; - default: - PMD_DRV_LOG(ERR, "Unsupported Service %u", - session->qat_cmd); - goto error_out; + if (service_type == QAT_SERVICE_SYMMETRIC) + info->driver_id = qat_sym_driver_id; + /* No limit of number of sessions */ + info->sym.max_nb_sessions = 0; } - return session; - -error_out: - rte_mempool_put(internals->sess_mp, session); - return NULL; } -struct qat_session * -qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct qat_session *session_private) +void +qat_cryptodev_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) { + struct qat_common_stats qat_stats = {0}; + struct qat_cryptodev_private *qat_priv; - struct qat_pmd_private *internals = dev->data->dev_private; - struct qat_session *session = session_private; - struct rte_crypto_auth_xform *auth_xform = NULL; - struct rte_crypto_cipher_xform *cipher_xform = NULL; - auth_xform = qat_get_auth_xform(xform); - - switch (auth_xform->algo) { - case RTE_CRYPTO_AUTH_SHA1_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1; - break; - case RTE_CRYPTO_AUTH_SHA224_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224; - break; - case RTE_CRYPTO_AUTH_SHA256_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256; - break; - case RTE_CRYPTO_AUTH_SHA384_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384; - break; - case RTE_CRYPTO_AUTH_SHA512_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512; - break; - case RTE_CRYPTO_AUTH_AES_XCBC_MAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC; - break; - case RTE_CRYPTO_AUTH_AES_GCM: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; - break; - case RTE_CRYPTO_AUTH_AES_GMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; - break; - case RTE_CRYPTO_AUTH_SNOW3G_UIA2: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2; - break; - case RTE_CRYPTO_AUTH_MD5_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5; - break; - case RTE_CRYPTO_AUTH_NULL: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL; - break; - case RTE_CRYPTO_AUTH_KASUMI_F9: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9; - break; - case RTE_CRYPTO_AUTH_SHA1: - case RTE_CRYPTO_AUTH_SHA256: - case RTE_CRYPTO_AUTH_SHA512: - case RTE_CRYPTO_AUTH_SHA224: - case RTE_CRYPTO_AUTH_SHA384: - case RTE_CRYPTO_AUTH_MD5: - case RTE_CRYPTO_AUTH_AES_CCM: - case RTE_CRYPTO_AUTH_AES_CMAC: - case RTE_CRYPTO_AUTH_AES_CBC_MAC: - case RTE_CRYPTO_AUTH_ZUC_EIA3: - PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u", - auth_xform->algo); - goto error_out; - default: - PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified", - auth_xform->algo); - goto error_out; - } - cipher_xform = qat_get_cipher_xform(xform); - - if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) || - (session->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) { - if (qat_alg_aead_session_create_content_desc_auth(session, - cipher_xform->key.data, - cipher_xform->key.length, - auth_xform->add_auth_data_length, - auth_xform->digest_length, - auth_xform->op)) - goto error_out; - } else { - if (qat_alg_aead_session_create_content_desc_auth(session, - auth_xform->key.data, - auth_xform->key.length, - auth_xform->add_auth_data_length, - auth_xform->digest_length, - auth_xform->op)) - goto error_out; + if (stats == NULL || dev == NULL) { + QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev); + return; } - return session; - -error_out: - if (internals->sess_mp != NULL) - rte_mempool_put(internals->sess_mp, session); - return NULL; -} + qat_priv = dev->data->dev_private; -unsigned qat_crypto_sym_get_session_private_size( - struct rte_cryptodev *dev __rte_unused) -{ - return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8); + qat_stats_get(qat_priv->qat_dev, &qat_stats, qat_priv->service_type); + stats->enqueued_count = qat_stats.enqueued_count; + stats->dequeued_count = qat_stats.dequeued_count; + stats->enqueue_err_count = qat_stats.enqueue_err_count; + stats->dequeue_err_count = qat_stats.dequeue_err_count; } - -uint16_t -qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) +void +qat_cryptodev_stats_reset(struct rte_cryptodev *dev) { - register struct qat_queue *queue; - struct qat_qp *tmp_qp = (struct qat_qp *)qp; - register uint32_t nb_ops_sent = 0; - register struct rte_crypto_op **cur_op = ops; - register int ret; - uint16_t nb_ops_possible = nb_ops; - register uint8_t *base_addr; - register uint32_t tail; - int overflow; - - if (unlikely(nb_ops == 0)) - return 0; + struct qat_cryptodev_private *qat_priv; - /* read params used a lot in main loop into registers */ - queue = &(tmp_qp->tx_q); - base_addr = (uint8_t *)queue->base_addr; - tail = queue->tail; - - /* Find how many can actually fit on the ring */ - overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops) - - queue->max_inflights; - if (overflow > 0) { - rte_atomic16_sub(&tmp_qp->inflights16, overflow); - nb_ops_possible = nb_ops - overflow; - if (nb_ops_possible == 0) - return 0; + if (dev == NULL) { + QAT_LOG(ERR, "invalid cryptodev ptr %p", dev); + return; } + qat_priv = dev->data->dev_private; - while (nb_ops_sent != nb_ops_possible) { - ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail); - if (ret != 0) { - tmp_qp->stats.enqueue_err_count++; - if (nb_ops_sent == 0) - return 0; - goto kick_tail; - } + qat_stats_reset(qat_priv->qat_dev, qat_priv->service_type); - tail = adf_modulo(tail + queue->msg_size, queue->modulo); - nb_ops_sent++; - cur_op++; - } -kick_tail: - WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number, - queue->hw_queue_number, tail); - queue->tail = tail; - tmp_qp->stats.enqueued_count += nb_ops_sent; - return nb_ops_sent; } -uint16_t -qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) +int +qat_cryptodev_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) { - struct qat_queue *queue; - struct qat_qp *tmp_qp = (struct qat_qp *)qp; - uint32_t msg_counter = 0; - struct rte_crypto_op *rx_op; - struct icp_qat_fw_comn_resp *resp_msg; + struct qat_cryptodev_private *qat_private = dev->data->dev_private; + struct qat_pci_device *qat_dev = qat_private->qat_dev; + enum qat_device_gen qat_dev_gen = qat_dev->qat_dev_gen; + enum qat_service_type service_type = qat_private->service_type; - queue = &(tmp_qp->rx_q); - resp_msg = (struct icp_qat_fw_comn_resp *) - ((uint8_t *)queue->base_addr + queue->head); + QAT_LOG(DEBUG, "Release %s qp %u on device %d", + qat_service_get_str(service_type), + queue_pair_id, dev->data->dev_id); - while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG && - msg_counter != nb_ops) { - rx_op = (struct rte_crypto_op *)(uintptr_t) - (resp_msg->opaque_data); + qat_private->qat_dev->qps_in_use[service_type][queue_pair_id] = NULL; -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX - rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg, - sizeof(struct icp_qat_fw_comn_resp)); -#endif - if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != - ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET( - resp_msg->comn_hdr.comn_status)) { - rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; - } else { - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; - } - *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG; - queue->head = adf_modulo(queue->head + - queue->msg_size, - ADF_RING_SIZE_MODULO(queue->queue_size)); - resp_msg = (struct icp_qat_fw_comn_resp *) - ((uint8_t *)queue->base_addr + - queue->head); - *ops = rx_op; - ops++; - msg_counter++; - } - if (msg_counter > 0) { - WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr, - queue->hw_bundle_number, - queue->hw_queue_number, queue->head); - rte_atomic16_sub(&tmp_qp->inflights16, msg_counter); - tmp_qp->stats.dequeued_count += msg_counter; - } - return msg_counter; + return qat_qp_release(qat_dev_gen, (struct qat_qp **) + &(dev->data->queue_pairs[queue_pair_id])); } -static inline int -qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) +int +qat_cryptodev_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, + const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) { - struct qat_session *ctx; - struct icp_qat_fw_la_cipher_req_params *cipher_param; - struct icp_qat_fw_la_auth_req_params *auth_param; - register struct icp_qat_fw_la_bulk_req *qat_req; + struct qat_qp **qp_addr = + (struct qat_qp **)&(dev->data->queue_pairs[qp_id]); + struct qat_cryptodev_private *qat_private = dev->data->dev_private; + struct qat_pci_device *qat_dev = qat_private->qat_dev; + enum qat_service_type service_type = qat_private->service_type; + struct qat_qp_config qat_qp_conf = {0}; + struct qat_qp *qp; + int ret = 0; + uint32_t i; -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX - if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) { - PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto " - "operation requests, op (%p) is not a " - "symmetric operation.", op); - return -EINVAL; + /* If qp is already in use free ring memory and qp metadata. */ + if (*qp_addr != NULL) { + ret = dev->dev_ops->queue_pair_release(dev, qp_id); + if (ret < 0) + return -EBUSY; } -#endif - if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) { - PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented" - " requests, op (%p) is sessionless.", op); + if (qp_id >= qat_qps_per_service(qat_dev, service_type)) { + QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id); return -EINVAL; } - if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) { - PMD_DRV_LOG(ERR, "Session was not created for this device"); + qat_qp_conf.hw = qat_qp_get_hw_data(qat_dev, service_type, + qp_id); + if (qat_qp_conf.hw == NULL) { + QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id); return -EINVAL; } - ctx = (struct qat_session *)op->sym->session->_private; - qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg; - rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); - qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; - - qat_req->comn_mid.dst_length = - qat_req->comn_mid.src_length = - rte_pktmbuf_data_len(op->sym->m_src); - - qat_req->comn_mid.dest_data_addr = - qat_req->comn_mid.src_data_addr = - rte_pktmbuf_mtophys(op->sym->m_src); - - if (unlikely(op->sym->m_dst != NULL)) { - qat_req->comn_mid.dest_data_addr = - rte_pktmbuf_mtophys(op->sym->m_dst); - qat_req->comn_mid.dst_length = - rte_pktmbuf_data_len(op->sym->m_dst); - } - - cipher_param = (void *)&qat_req->serv_specif_rqpars; - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); - - cipher_param->cipher_length = op->sym->cipher.data.length; - cipher_param->cipher_offset = op->sym->cipher.data.offset; - if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || - ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { - if (unlikely((cipher_param->cipher_length % BYTE_LENGTH != 0) || - (cipher_param->cipher_offset - % BYTE_LENGTH != 0))) { - PMD_DRV_LOG(ERR, " For SNOW 3G/KASUMI, QAT PMD only " - "supports byte aligned values"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - return -EINVAL; - } - cipher_param->cipher_length >>= 3; - cipher_param->cipher_offset >>= 3; - } - - if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <= - sizeof(cipher_param->u.cipher_IV_array))) { - rte_memcpy(cipher_param->u.cipher_IV_array, - op->sym->cipher.iv.data, - op->sym->cipher.iv.length); - } else { - ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_CIPH_IV_64BIT_PTR); - cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr; - } - if (op->sym->auth.digest.phys_addr) { - ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); - auth_param->auth_res_addr = op->sym->auth.digest.phys_addr; - } - auth_param->auth_off = op->sym->auth.data.offset; - auth_param->auth_len = op->sym->auth.data.length; - if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) { - if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0) || - (auth_param->auth_len % BYTE_LENGTH != 0))) { - PMD_DRV_LOG(ERR, " For SNOW 3G, QAT PMD only " - "supports byte aligned values"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - return -EINVAL; - } - auth_param->auth_off >>= 3; - auth_param->auth_len >>= 3; - } - if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || - ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) && - ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) { - auth_param->auth_len = (auth_param->auth_len >> 3) - + (auth_param->auth_off >> 3) - + (BYTE_LENGTH >> 3) - - 8; - auth_param->auth_off = 8; - } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH - && ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) { - auth_param->auth_len = (auth_param->auth_len >> 3) - + (auth_param->auth_off >> 3) - + (BYTE_LENGTH >> 3); - auth_param->auth_off = 0; - } - auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr; - - if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || - ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { - if (op->sym->cipher.iv.length == 12) { - /* - * For GCM a 12 bit IV is allowed, - * but we need to inform the f/w - */ - ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); - } - if (op->sym->cipher.data.length == 0) { - /* - * GMAC - */ - qat_req->comn_mid.dest_data_addr = - qat_req->comn_mid.src_data_addr = - op->sym->auth.aad.phys_addr; - auth_param->u1.aad_adr = 0; - auth_param->auth_len = op->sym->auth.aad.length; - auth_param->u2.aad_sz = 0; - - } - - } - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX - rte_hexdump(stdout, "qat_req:", qat_req, - sizeof(struct icp_qat_fw_la_bulk_req)); - rte_hexdump(stdout, "src_data:", - rte_pktmbuf_mtod(op->sym->m_src, uint8_t*), - rte_pktmbuf_data_len(op->sym->m_src)); - rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data, - op->sym->cipher.iv.length); - rte_hexdump(stdout, "digest:", op->sym->auth.digest.data, - op->sym->auth.digest.length); - rte_hexdump(stdout, "aad:", op->sym->auth.aad.data, - op->sym->auth.aad.length); -#endif - return 0; -} - -static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) -{ - uint32_t div = data >> shift; - uint32_t mult = div << shift; - - return data - mult; -} - -void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess) -{ - struct rte_cryptodev_sym_session *sess = sym_sess; - struct qat_session *s = (void *)sess->_private; - - PMD_INIT_FUNC_TRACE(); - s->cd_paddr = rte_mempool_virt2phy(mp, sess) + - offsetof(struct qat_session, cd) + - offsetof(struct rte_cryptodev_sym_session, _private); -} - -int qat_dev_config(__rte_unused struct rte_cryptodev *dev) -{ - PMD_INIT_FUNC_TRACE(); - return -ENOTSUP; -} - -int qat_dev_start(__rte_unused struct rte_cryptodev *dev) -{ - PMD_INIT_FUNC_TRACE(); - return 0; -} - -void qat_dev_stop(__rte_unused struct rte_cryptodev *dev) -{ - PMD_INIT_FUNC_TRACE(); -} - -int qat_dev_close(struct rte_cryptodev *dev) -{ - int i, ret; - - PMD_INIT_FUNC_TRACE(); - - for (i = 0; i < dev->data->nb_queue_pairs; i++) { - ret = qat_crypto_sym_qp_release(dev, i); - if (ret < 0) - return ret; - } - - return 0; -} - -void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev, - struct rte_cryptodev_info *info) -{ - struct qat_pmd_private *internals = dev->data->dev_private; + qat_qp_conf.cookie_size = service_type == QAT_SERVICE_SYMMETRIC ? + sizeof(struct qat_sym_op_cookie) : + sizeof(struct qat_asym_op_cookie); + qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors; + qat_qp_conf.socket_id = socket_id; + qat_qp_conf.service_str = qat_service_get_str(service_type); - PMD_INIT_FUNC_TRACE(); - if (info != NULL) { - info->max_nb_queue_pairs = - ADF_NUM_SYM_QPS_PER_BUNDLE * - ADF_NUM_BUNDLES_PER_DEV; - info->feature_flags = dev->feature_flags; - info->capabilities = qat_pmd_capabilities; - info->sym.max_nb_sessions = internals->max_nb_sessions; - info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD; - } -} + ret = qat_qp_setup(qat_dev, qp_addr, qp_id, &qat_qp_conf); + if (ret != 0) + return ret; -void qat_crypto_sym_stats_get(struct rte_cryptodev *dev, - struct rte_cryptodev_stats *stats) -{ - int i; - struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs); + /* store a link to the qp in the qat_pci_device */ + qat_dev->qps_in_use[service_type][qp_id] = *qp_addr; - PMD_INIT_FUNC_TRACE(); - if (stats == NULL) { - PMD_DRV_LOG(ERR, "invalid stats ptr NULL"); - return; - } - for (i = 0; i < dev->data->nb_queue_pairs; i++) { - if (qp[i] == NULL) { - PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); - continue; - } + qp = (struct qat_qp *)*qp_addr; + qp->min_enq_burst_threshold = qat_private->min_enq_burst_threshold; - stats->enqueued_count += qp[i]->stats.enqueued_count; - stats->dequeued_count += qp[i]->stats.enqueued_count; - stats->enqueue_err_count += qp[i]->stats.enqueue_err_count; - stats->dequeue_err_count += qp[i]->stats.enqueue_err_count; + for (i = 0; i < qp->nb_descriptors; i++) { + if (service_type == QAT_SERVICE_SYMMETRIC) + qat_sym_init_op_cookie(qp->op_cookies[i]); + else + qat_asym_init_op_cookie(qp->op_cookies[i]); } -} - -void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev) -{ - int i; - struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs); - PMD_INIT_FUNC_TRACE(); - for (i = 0; i < dev->data->nb_queue_pairs; i++) - memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats)); - PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared"); + return ret; }