X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fqat%2Fqat_crypto.c;h=ec481ae04b530bbe541ed3d63a67bb1bc94d2cba;hb=46a0547f9f2a07169c7285e098539ccc7c003dcc;hp=5969ba6f0354d068e824908cbe46e038bdbf17ca;hpb=2fa64f840d65afb0ba92c39c0b1b74359ed669f8;p=dpdk.git diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index 5969ba6f03..ec481ae04b 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2017 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -59,6 +59,9 @@ #include #include #include +#include +#include +#include #include "qat_logs.h" #include "qat_algs.h" @@ -67,417 +70,169 @@ #define BYTE_LENGTH 8 -static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = { - { /* SHA1 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .block_size = 64, - .key_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - .digest_size = { - .min = 20, - .max = 20, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* SHA224 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, - .block_size = 64, - .key_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - .digest_size = { - .min = 28, - .max = 28, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* SHA256 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, - .block_size = 64, - .key_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - .digest_size = { - .min = 32, - .max = 32, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* SHA384 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, - .block_size = 64, - .key_size = { - .min = 128, - .max = 128, - .increment = 0 - }, - .digest_size = { - .min = 48, - .max = 48, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* SHA512 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, - .block_size = 128, - .key_size = { - .min = 128, - .max = 128, - .increment = 0 - }, - .digest_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* MD5 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_MD5_HMAC, - .block_size = 64, - .key_size = { - .min = 8, - .max = 64, - .increment = 8 - }, - .digest_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* AES XCBC MAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, - .block_size = 16, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .digest_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* AES GCM (AUTH) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_AES_GCM, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .digest_size = { - .min = 8, - .max = 16, - .increment = 4 - }, - .aad_size = { - .min = 8, - .max = 12, - .increment = 4 - } - }, } - }, } - }, - { /* AES GMAC (AUTH) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_AES_GMAC, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .digest_size = { - .min = 8, - .max = 16, - .increment = 4 - }, - .aad_size = { - .min = 1, - .max = 65535, - .increment = 1 - } - }, } - }, } - }, - { /* SNOW3G (UIA2) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, - .block_size = 16, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .digest_size = { - .min = 4, - .max = 4, - .increment = 0 - }, - .aad_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* AES GCM (CIPHER) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_AES_GCM, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* AES CBC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_AES_CBC, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* SNOW3G (UEA2) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, - .block_size = 16, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* AES CTR */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_AES_CTR, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* NULL (AUTH) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_NULL, - .block_size = 1, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .aad_size = { 0 } - }, }, - }, }, - }, - { /* NULL (CIPHER) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_NULL, - .block_size = 1, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .iv_size = { - .min = 0, - .max = 0, - .increment = 0 - } - }, }, - }, } - }, - { /* KASUMI (F8) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, - .block_size = 8, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .iv_size = { - .min = 8, - .max = 8, - .increment = 0 - } - }, } - }, } - }, - { /* KASUMI (F9) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_KASUMI_F9, - .block_size = 8, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .digest_size = { - .min = 4, - .max = 4, - .increment = 0 - }, - .aad_size = { - .min = 8, - .max = 8, - .increment = 0 - } - }, } - }, } - }, - RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() -}; +static int +qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo, + struct qat_pmd_private *internals) { + int i = 0; + const struct rte_cryptodev_capabilities *capability; + + while ((capability = &(internals->qat_dev_capabilities[i++]))->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) + continue; + + if (capability->sym.cipher.algo == algo) + return 1; + } + return 0; +} + +static int +qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo, + struct qat_pmd_private *internals) { + int i = 0; + const struct rte_cryptodev_capabilities *capability; + + while ((capability = &(internals->qat_dev_capabilities[i++]))->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) + continue; + + if (capability->sym.auth.algo == algo) + return 1; + } + return 0; +} + +/** Encrypt a single partial block + * Depends on openssl libcrypto + * Uses ECB+XOR to do CFB encryption, same result, more performant + */ +static inline int +bpi_cipher_encrypt(uint8_t *src, uint8_t *dst, + uint8_t *iv, int ivlen, int srclen, + void *bpi_ctx) +{ + EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; + int encrypted_ivlen; + uint8_t encrypted_iv[16]; + int i; + + /* ECB method: encrypt the IV, then XOR this with plaintext */ + if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) + <= 0) + goto cipher_encrypt_err; + + for (i = 0; i < srclen; i++) + *(dst+i) = *(src+i)^(encrypted_iv[i]); + + return 0; + +cipher_encrypt_err: + PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed"); + return -EINVAL; +} + +/** Decrypt a single partial block + * Depends on openssl libcrypto + * Uses ECB+XOR to do CFB encryption, same result, more performant + */ +static inline int +bpi_cipher_decrypt(uint8_t *src, uint8_t *dst, + uint8_t *iv, int ivlen, int srclen, + void *bpi_ctx) +{ + EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; + int encrypted_ivlen; + uint8_t encrypted_iv[16]; + int i; + + /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */ + if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) + <= 0) + goto cipher_decrypt_err; + + for (i = 0; i < srclen; i++) + *(dst+i) = *(src+i)^(encrypted_iv[i]); + + return 0; + +cipher_decrypt_err: + PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed"); + return -EINVAL; +} + +/** Creates a context in either AES or DES in ECB mode + * Depends on openssl libcrypto + */ +static void * +bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo, + enum rte_crypto_cipher_operation direction __rte_unused, + uint8_t *key) +{ + const EVP_CIPHER *algo = NULL; + EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new(); + + if (ctx == NULL) + goto ctx_init_err; + + if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI) + algo = EVP_des_ecb(); + else + algo = EVP_aes_128_ecb(); + + /* IV will be ECB encrypted whether direction is encrypt or decrypt*/ + if (EVP_EncryptInit_ex(ctx, algo, NULL, key, 0) != 1) + goto ctx_init_err; + + return ctx; + +ctx_init_err: + if (ctx != NULL) + EVP_CIPHER_CTX_free(ctx); + return NULL; +} + +/** Frees a context previously created + * Depends on openssl libcrypto + */ +static void +bpi_cipher_ctx_free(void *bpi_ctx) +{ + if (bpi_ctx != NULL) + EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx); +} static inline uint32_t adf_modulo(uint32_t data, uint32_t shift); static inline int -qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg); +qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, + struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp); -void qat_crypto_sym_clear_session(struct rte_cryptodev *dev, - void *session) +void +qat_crypto_sym_clear_session(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) { - struct qat_session *sess = session; - phys_addr_t cd_paddr; - PMD_INIT_FUNC_TRACE(); - if (session) { - cd_paddr = sess->cd_paddr; - memset(sess, 0, qat_crypto_sym_get_session_private_size(dev)); - sess->cd_paddr = cd_paddr; - } else - PMD_DRV_LOG(ERR, "NULL session"); + uint8_t index = dev->driver_id; + void *sess_priv = get_session_private_data(sess, index); + struct qat_session *s = (struct qat_session *)sess_priv; + + if (sess_priv) { + if (s->bpi_ctx) + bpi_cipher_ctx_free(s->bpi_ctx); + memset(s, 0, qat_crypto_sym_get_session_private_size(dev)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + set_session_private_data(sess, index, NULL); + rte_mempool_put(sess_mp, sess_priv); + } } static int @@ -491,6 +246,14 @@ qat_get_cmd_id(const struct rte_crypto_sym_xform *xform) if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL) return ICP_QAT_FW_LA_CMD_AUTH; + /* AEAD */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) + return ICP_QAT_FW_LA_CMD_CIPHER_HASH; + else + return ICP_QAT_FW_LA_CMD_HASH_CIPHER; + } + if (xform->next == NULL) return -1; @@ -536,15 +299,16 @@ void * qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *session_private) { - struct qat_pmd_private *internals = dev->data->dev_private; - struct qat_session *session = session_private; - + struct qat_pmd_private *internals = dev->data->dev_private; struct rte_crypto_cipher_xform *cipher_xform = NULL; /* Get cipher xform from crypto xform chain */ cipher_xform = qat_get_cipher_xform(xform); + session->cipher_iv.offset = cipher_xform->iv.offset; + session->cipher_iv.length = cipher_xform->iv.length; + switch (cipher_xform->algo) { case RTE_CRYPTO_CIPHER_AES_CBC: if (qat_alg_validate_aes_key(cipher_xform->key.length, @@ -554,14 +318,6 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, } session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; break; - case RTE_CRYPTO_CIPHER_AES_GCM: - if (qat_alg_validate_aes_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES cipher key size"); - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - break; case RTE_CRYPTO_CIPHER_AES_CTR: if (qat_alg_validate_aes_key(cipher_xform->key.length, &session->qat_cipher_alg) != 0) { @@ -573,7 +329,7 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: if (qat_alg_validate_snow3g_key(cipher_xform->key.length, &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid SNOW3G cipher key size"); + PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size"); goto error_out; } session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; @@ -589,11 +345,83 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, } session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE; break; - case RTE_CRYPTO_CIPHER_3DES_ECB: case RTE_CRYPTO_CIPHER_3DES_CBC: + if (qat_alg_validate_3des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_DES_CBC: + if (qat_alg_validate_des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid DES cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_3DES_CTR: + if (qat_alg_validate_3des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + break; + case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: + session->bpi_ctx = bpi_cipher_ctx_init( + cipher_xform->algo, + cipher_xform->op, + cipher_xform->key.data); + if (session->bpi_ctx == NULL) { + PMD_DRV_LOG(ERR, "failed to create DES BPI ctx"); + goto error_out; + } + if (qat_alg_validate_des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid DES cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_AES_DOCSISBPI: + session->bpi_ctx = bpi_cipher_ctx_init( + cipher_xform->algo, + cipher_xform->op, + cipher_xform->key.data); + if (session->bpi_ctx == NULL) { + PMD_DRV_LOG(ERR, "failed to create AES BPI ctx"); + goto error_out; + } + if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_ZUC_EEA3: + if (!qat_is_cipher_alg_supported( + cipher_xform->algo, internals)) { + PMD_DRV_LOG(ERR, "%s not supported on this device", + rte_crypto_cipher_algorithm_strings + [cipher_xform->algo]); + goto error_out; + } + if (qat_alg_validate_zuc_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; + break; + case RTE_CRYPTO_CIPHER_3DES_ECB: case RTE_CRYPTO_CIPHER_AES_ECB: - case RTE_CRYPTO_CIPHER_AES_CCM: - PMD_DRV_LOG(ERR, "Crypto: Unsupported Cipher alg %u", + case RTE_CRYPTO_CIPHER_AES_F8: + case RTE_CRYPTO_CIPHER_AES_XTS: + case RTE_CRYPTO_CIPHER_ARC4: + PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u", cipher_xform->algo); goto error_out; default: @@ -615,23 +443,57 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, return session; error_out: - rte_mempool_put(internals->sess_mp, session); + if (session->bpi_ctx) { + bpi_cipher_ctx_free(session->bpi_ctx); + session->bpi_ctx = NULL; + } return NULL; } - -void * +int qat_crypto_sym_configure_session(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, void *session_private) + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mempool) { - struct qat_pmd_private *internals = dev->data->dev_private; + void *sess_private_data; + + if (rte_mempool_get(mempool, &sess_private_data)) { + CDEV_LOG_ERR( + "Couldn't get object from session mempool"); + return -1; + } + if (qat_crypto_set_session_parameters(dev, xform, sess_private_data) != 0) { + PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure " + "session parameters"); + + /* Return session to mempool */ + rte_mempool_put(mempool, sess_private_data); + return -1; + } + + set_session_private_data(sess, dev->driver_id, + sess_private_data); + + return 0; +} + +int +qat_crypto_set_session_parameters(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, void *session_private) +{ struct qat_session *session = session_private; int qat_cmd_id; - PMD_INIT_FUNC_TRACE(); + /* Set context descriptor physical address */ + session->cd_paddr = rte_mempool_virt2phy(NULL, session) + + offsetof(struct qat_session, cd); + + session->min_qat_dev_gen = QAT_GEN1; + /* Get requested QAT command id */ qat_cmd_id = qat_get_cmd_id(xform); if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) { @@ -647,12 +509,26 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev, session = qat_crypto_sym_configure_session_auth(dev, xform, session); break; case ICP_QAT_FW_LA_CMD_CIPHER_HASH: - session = qat_crypto_sym_configure_session_cipher(dev, xform, session); - session = qat_crypto_sym_configure_session_auth(dev, xform, session); + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) + session = qat_crypto_sym_configure_session_aead(xform, + session); + else { + session = qat_crypto_sym_configure_session_cipher(dev, + xform, session); + session = qat_crypto_sym_configure_session_auth(dev, + xform, session); + } break; case ICP_QAT_FW_LA_CMD_HASH_CIPHER: - session = qat_crypto_sym_configure_session_auth(dev, xform, session); - session = qat_crypto_sym_configure_session_cipher(dev, xform, session); + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) + session = qat_crypto_sym_configure_session_aead(xform, + session); + else { + session = qat_crypto_sym_configure_session_auth(dev, + xform, session); + session = qat_crypto_sym_configure_session_cipher(dev, + xform, session); + } break; case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: case ICP_QAT_FW_LA_CMD_TRNG_TEST: @@ -671,11 +547,11 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev, session->qat_cmd); goto error_out; } - return session; + + return 0; error_out: - rte_mempool_put(internals->sess_mp, session); - return NULL; + return -1; } struct qat_session * @@ -684,11 +560,12 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, struct qat_session *session_private) { - struct qat_pmd_private *internals = dev->data->dev_private; struct qat_session *session = session_private; struct rte_crypto_auth_xform *auth_xform = NULL; - struct rte_crypto_cipher_xform *cipher_xform = NULL; + struct qat_pmd_private *internals = dev->data->dev_private; auth_xform = qat_get_auth_xform(xform); + uint8_t *key_data = auth_xform->key.data; + uint8_t key_length = auth_xform->key.length; switch (auth_xform->algo) { case RTE_CRYPTO_AUTH_SHA1_HMAC: @@ -709,11 +586,15 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_AES_XCBC_MAC: session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC; break; - case RTE_CRYPTO_AUTH_AES_GCM: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; - break; case RTE_CRYPTO_AUTH_AES_GMAC: + if (qat_alg_validate_aes_key(auth_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid AES key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; + break; case RTE_CRYPTO_AUTH_SNOW3G_UIA2: session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2; @@ -727,16 +608,23 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_KASUMI_F9: session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9; break; + case RTE_CRYPTO_AUTH_ZUC_EIA3: + if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) { + PMD_DRV_LOG(ERR, "%s not supported on this device", + rte_crypto_auth_algorithm_strings + [auth_xform->algo]); + goto error_out; + } + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3; + break; case RTE_CRYPTO_AUTH_SHA1: case RTE_CRYPTO_AUTH_SHA256: case RTE_CRYPTO_AUTH_SHA512: case RTE_CRYPTO_AUTH_SHA224: case RTE_CRYPTO_AUTH_SHA384: case RTE_CRYPTO_AUTH_MD5: - case RTE_CRYPTO_AUTH_AES_CCM: case RTE_CRYPTO_AUTH_AES_CMAC: case RTE_CRYPTO_AUTH_AES_CBC_MAC: - case RTE_CRYPTO_AUTH_ZUC_EIA3: PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u", auth_xform->algo); goto error_out; @@ -745,32 +633,145 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, auth_xform->algo); goto error_out; } - cipher_xform = qat_get_cipher_xform(xform); - if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) || - (session->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) { + session->auth_iv.offset = auth_xform->iv.offset; + session->auth_iv.length = auth_xform->iv.length; + + if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) { + if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) { + session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH; + session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + /* + * It needs to create cipher desc content first, + * then authentication + */ + if (qat_alg_aead_session_create_content_desc_cipher(session, + auth_xform->key.data, + auth_xform->key.length)) + goto error_out; + + if (qat_alg_aead_session_create_content_desc_auth(session, + key_data, + key_length, + 0, + auth_xform->digest_length, + auth_xform->op)) + goto error_out; + } else { + session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER; + session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; + /* + * It needs to create authentication desc content first, + * then cipher + */ + if (qat_alg_aead_session_create_content_desc_auth(session, + key_data, + key_length, + 0, + auth_xform->digest_length, + auth_xform->op)) + goto error_out; + + if (qat_alg_aead_session_create_content_desc_cipher(session, + auth_xform->key.data, + auth_xform->key.length)) + goto error_out; + } + /* Restore to authentication only only */ + session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH; + } else { if (qat_alg_aead_session_create_content_desc_auth(session, - cipher_xform->key.data, - cipher_xform->key.length, - auth_xform->add_auth_data_length, + key_data, + key_length, + 0, auth_xform->digest_length, auth_xform->op)) goto error_out; + } + + session->digest_length = auth_xform->digest_length; + return session; + +error_out: + return NULL; +} + +struct qat_session * +qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform, + struct qat_session *session_private) +{ + struct qat_session *session = session_private; + struct rte_crypto_aead_xform *aead_xform = &xform->aead; + + /* + * Store AEAD IV parameters as cipher IV, + * to avoid unnecessary memory usage + */ + session->cipher_iv.offset = xform->aead.iv.offset; + session->cipher_iv.length = xform->aead.iv.length; + + switch (aead_xform->algo) { + case RTE_CRYPTO_AEAD_AES_GCM: + if (qat_alg_validate_aes_key(aead_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid AES key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; + break; + case RTE_CRYPTO_AEAD_AES_CCM: + PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported AEAD alg %u", + aead_xform->algo); + goto error_out; + default: + PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n", + aead_xform->algo); + goto error_out; + } + + if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) { + session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + /* + * It needs to create cipher desc content first, + * then authentication + */ + if (qat_alg_aead_session_create_content_desc_cipher(session, + aead_xform->key.data, + aead_xform->key.length)) + goto error_out; + + if (qat_alg_aead_session_create_content_desc_auth(session, + aead_xform->key.data, + aead_xform->key.length, + aead_xform->aad_length, + aead_xform->digest_length, + RTE_CRYPTO_AUTH_OP_GENERATE)) + goto error_out; } else { + session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; + /* + * It needs to create authentication desc content first, + * then cipher + */ if (qat_alg_aead_session_create_content_desc_auth(session, - auth_xform->key.data, - auth_xform->key.length, - auth_xform->add_auth_data_length, - auth_xform->digest_length, - auth_xform->op)) + aead_xform->key.data, + aead_xform->key.length, + aead_xform->aad_length, + aead_xform->digest_length, + RTE_CRYPTO_AUTH_OP_VERIFY)) + goto error_out; + + if (qat_alg_aead_session_create_content_desc_cipher(session, + aead_xform->key.data, + aead_xform->key.length)) goto error_out; } + + session->digest_length = aead_xform->digest_length; return session; error_out: - if (internals->sess_mp != NULL) - rte_mempool_put(internals->sess_mp, session); return NULL; } @@ -780,6 +781,116 @@ unsigned qat_crypto_sym_get_session_private_size( return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8); } +static inline uint32_t +qat_bpicipher_preprocess(struct qat_session *ctx, + struct rte_crypto_op *op) +{ + uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); + struct rte_crypto_sym_op *sym_op = op->sym; + uint8_t last_block_len = block_len > 0 ? + sym_op->cipher.data.length % block_len : 0; + + if (last_block_len && + ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) { + + /* Decrypt last block */ + uint8_t *last_block, *dst, *iv; + uint32_t last_block_offset = sym_op->cipher.data.offset + + sym_op->cipher.data.length - last_block_len; + last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, + uint8_t *, last_block_offset); + + if (unlikely(sym_op->m_dst != NULL)) + /* out-of-place operation (OOP) */ + dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, + uint8_t *, last_block_offset); + else + dst = last_block; + + if (last_block_len < sym_op->cipher.data.length) + /* use previous block ciphertext as IV */ + iv = last_block - block_len; + else + /* runt block, i.e. less than one full block */ + iv = rte_crypto_op_ctod_offset(op, uint8_t *, + ctx->cipher_iv.offset); + +#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX + rte_hexdump(stdout, "BPI: src before pre-process:", last_block, + last_block_len); + if (sym_op->m_dst != NULL) + rte_hexdump(stdout, "BPI: dst before pre-process:", dst, + last_block_len); +#endif + bpi_cipher_decrypt(last_block, dst, iv, block_len, + last_block_len, ctx->bpi_ctx); +#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX + rte_hexdump(stdout, "BPI: src after pre-process:", last_block, + last_block_len); + if (sym_op->m_dst != NULL) + rte_hexdump(stdout, "BPI: dst after pre-process:", dst, + last_block_len); +#endif + } + + return sym_op->cipher.data.length - last_block_len; +} + +static inline uint32_t +qat_bpicipher_postprocess(struct qat_session *ctx, + struct rte_crypto_op *op) +{ + uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); + struct rte_crypto_sym_op *sym_op = op->sym; + uint8_t last_block_len = block_len > 0 ? + sym_op->cipher.data.length % block_len : 0; + + if (last_block_len > 0 && + ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) { + + /* Encrypt last block */ + uint8_t *last_block, *dst, *iv; + uint32_t last_block_offset; + + last_block_offset = sym_op->cipher.data.offset + + sym_op->cipher.data.length - last_block_len; + last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, + uint8_t *, last_block_offset); + + if (unlikely(sym_op->m_dst != NULL)) + /* out-of-place operation (OOP) */ + dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, + uint8_t *, last_block_offset); + else + dst = last_block; + + if (last_block_len < sym_op->cipher.data.length) + /* use previous block ciphertext as IV */ + iv = dst - block_len; + else + /* runt block, i.e. less than one full block */ + iv = rte_crypto_op_ctod_offset(op, uint8_t *, + ctx->cipher_iv.offset); + +#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX + rte_hexdump(stdout, "BPI: src before post-process:", last_block, + last_block_len); + if (sym_op->m_dst != NULL) + rte_hexdump(stdout, "BPI: dst before post-process:", + dst, last_block_len); +#endif + bpi_cipher_encrypt(last_block, dst, iv, block_len, + last_block_len, ctx->bpi_ctx); +#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX + rte_hexdump(stdout, "BPI: src after post-process:", last_block, + last_block_len); + if (sym_op->m_dst != NULL) + rte_hexdump(stdout, "BPI: dst after post-process:", dst, + last_block_len); +#endif + } + return sym_op->cipher.data.length - last_block_len; +} uint16_t qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, @@ -814,9 +925,16 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, } while (nb_ops_sent != nb_ops_possible) { - ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail); + ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail, + tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp); if (ret != 0) { tmp_qp->stats.enqueue_err_count++; + /* + * This message cannot be enqueued, + * decrease number of ops that wasn't sent + */ + rte_atomic16_sub(&tmp_qp->inflights16, + nb_ops_possible - nb_ops_sent); if (nb_ops_sent == 0) return 0; goto kick_tail; @@ -855,15 +973,24 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg, - sizeof(struct icp_qat_fw_comn_resp)); + sizeof(struct icp_qat_fw_comn_resp)); + #endif if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET( resp_msg->comn_hdr.comn_status)) { rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; } else { + struct qat_session *sess = (struct qat_session *) + get_session_private_data( + rx_op->sym->session, + cryptodev_qat_driver_id); + + if (sess->bpi_ctx) + qat_bpicipher_postprocess(sess, rx_op); rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; } + *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG; queue->head = adf_modulo(queue->head + queue->msg_size, @@ -886,12 +1013,89 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, } static inline int -qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) +qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start, + struct qat_alg_buf_list *list, uint32_t data_len) +{ + int nr = 1; + + uint32_t buf_len = rte_pktmbuf_mtophys(buf) - + buff_start + rte_pktmbuf_data_len(buf); + + list->bufers[0].addr = buff_start; + list->bufers[0].resrvd = 0; + list->bufers[0].len = buf_len; + + if (data_len <= buf_len) { + list->num_bufs = nr; + list->bufers[0].len = data_len; + return 0; + } + + buf = buf->next; + while (buf) { + if (unlikely(nr == QAT_SGL_MAX_NUMBER)) { + PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL" + " entry(%u)", + QAT_SGL_MAX_NUMBER); + return -EINVAL; + } + + list->bufers[nr].len = rte_pktmbuf_data_len(buf); + list->bufers[nr].resrvd = 0; + list->bufers[nr].addr = rte_pktmbuf_mtophys(buf); + + buf_len += list->bufers[nr].len; + buf = buf->next; + + if (buf_len > data_len) { + list->bufers[nr].len -= + buf_len - data_len; + buf = NULL; + } + ++nr; + } + list->num_bufs = nr; + + return 0; +} + +static inline void +set_cipher_iv(uint16_t iv_length, uint16_t iv_offset, + struct icp_qat_fw_la_cipher_req_params *cipher_param, + struct rte_crypto_op *op, + struct icp_qat_fw_la_bulk_req *qat_req) +{ + /* copy IV into request if it fits */ + if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) { + rte_memcpy(cipher_param->u.cipher_IV_array, + rte_crypto_op_ctod_offset(op, uint8_t *, + iv_offset), + iv_length); + } else { + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_CIPH_IV_64BIT_PTR); + cipher_param->u.s.cipher_IV_ptr = + rte_crypto_op_ctophys_offset(op, + iv_offset); + } +} + +static inline int +qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, + struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp) { + int ret = 0; struct qat_session *ctx; struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_auth_req_params *auth_param; register struct icp_qat_fw_la_bulk_req *qat_req; + uint8_t do_auth = 0, do_cipher = 0, do_aead = 0; + uint32_t cipher_len = 0, cipher_ofs = 0; + uint32_t auth_len = 0, auth_ofs = 0; + uint32_t min_ofs = 0; + uint64_t src_buf_start = 0, dst_buf_start = 0; + uint8_t do_sgl = 0; #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) { @@ -901,128 +1105,273 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) return -EINVAL; } #endif - if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) { + if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) { PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented" " requests, op (%p) is sessionless.", op); return -EINVAL; } - if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) { + ctx = (struct qat_session *)get_session_private_data( + op->sym->session, cryptodev_qat_driver_id); + + if (unlikely(ctx == NULL)) { PMD_DRV_LOG(ERR, "Session was not created for this device"); return -EINVAL; } - ctx = (struct qat_session *)op->sym->session->_private; + if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) { + PMD_DRV_LOG(ERR, "Session alg not supported on this device gen"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return -EINVAL; + } + qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg; rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; + cipher_param = (void *)&qat_req->serv_specif_rqpars; + auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); - qat_req->comn_mid.dst_length = - qat_req->comn_mid.src_length = - rte_pktmbuf_data_len(op->sym->m_src); + if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || + ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { + /* AES-GCM */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { + do_aead = 1; + } else { + do_auth = 1; + do_cipher = 1; + } + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) { + do_auth = 1; + do_cipher = 0; + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { + do_auth = 0; + do_cipher = 1; + } - qat_req->comn_mid.dest_data_addr = - qat_req->comn_mid.src_data_addr = - rte_pktmbuf_mtophys(op->sym->m_src); + if (do_cipher) { + + if (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + + if (unlikely( + (cipher_param->cipher_length % BYTE_LENGTH != 0) + || (cipher_param->cipher_offset + % BYTE_LENGTH != 0))) { + PMD_DRV_LOG(ERR, + "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + cipher_len = op->sym->cipher.data.length >> 3; + cipher_ofs = op->sym->cipher.data.offset >> 3; + + } else if (ctx->bpi_ctx) { + /* DOCSIS - only send complete blocks to device + * Process any partial block using CFB mode. + * Even if 0 complete blocks, still send this to device + * to get into rx queue for post-process and dequeuing + */ + cipher_len = qat_bpicipher_preprocess(ctx, op); + cipher_ofs = op->sym->cipher.data.offset; + } else { + cipher_len = op->sym->cipher.data.length; + cipher_ofs = op->sym->cipher.data.offset; + } - if (unlikely(op->sym->m_dst != NULL)) { - qat_req->comn_mid.dest_data_addr = - rte_pktmbuf_mtophys(op->sym->m_dst); - qat_req->comn_mid.dst_length = - rte_pktmbuf_data_len(op->sym->m_dst); + set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset, + cipher_param, op, qat_req); + min_ofs = cipher_ofs; } - cipher_param = (void *)&qat_req->serv_specif_rqpars; - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + if (do_auth) { + + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 || + ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) { + if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0) + || (auth_param->auth_len % BYTE_LENGTH != 0))) { + PMD_DRV_LOG(ERR, + "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + auth_ofs = op->sym->auth.data.offset >> 3; + auth_len = op->sym->auth.data.length >> 3; + + auth_param->u1.aad_adr = + rte_crypto_op_ctophys_offset(op, + ctx->auth_iv.offset); + + } else if (ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { + /* AES-GMAC */ + set_cipher_iv(ctx->auth_iv.length, + ctx->auth_iv.offset, + cipher_param, op, qat_req); + auth_ofs = op->sym->auth.data.offset; + auth_len = op->sym->auth.data.length; + + auth_param->u1.aad_adr = 0; + auth_param->u2.aad_sz = 0; + + /* + * If len(iv)==12B fw computes J0 + */ + if (ctx->auth_iv.length == 12) { + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + + } + } else { + auth_ofs = op->sym->auth.data.offset; + auth_len = op->sym->auth.data.length; - cipher_param->cipher_length = op->sym->cipher.data.length; - cipher_param->cipher_offset = op->sym->cipher.data.offset; - if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || - ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { - if (unlikely((cipher_param->cipher_length % BYTE_LENGTH != 0) || - (cipher_param->cipher_offset - % BYTE_LENGTH != 0))) { - PMD_DRV_LOG(ERR, " For Snow3g/Kasumi, QAT PMD only " - "supports byte aligned values"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - return -EINVAL; } - cipher_param->cipher_length >>= 3; - cipher_param->cipher_offset >>= 3; - } + min_ofs = auth_ofs; - if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <= - sizeof(cipher_param->u.cipher_IV_array))) { - rte_memcpy(cipher_param->u.cipher_IV_array, - op->sym->cipher.iv.data, - op->sym->cipher.iv.length); - } else { - ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_CIPH_IV_64BIT_PTR); - cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr; - } - if (op->sym->auth.digest.phys_addr) { - ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); auth_param->auth_res_addr = op->sym->auth.digest.phys_addr; + } - auth_param->auth_off = op->sym->auth.data.offset; - auth_param->auth_len = op->sym->auth.data.length; - if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) { - if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0) || - (auth_param->auth_len % BYTE_LENGTH != 0))) { - PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only " - "supports byte aligned values"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - return -EINVAL; + + if (do_aead) { + if (ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { + /* + * If len(iv)==12B fw computes J0 + */ + if (ctx->cipher_iv.length == 12) { + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + } + } - auth_param->auth_off >>= 3; - auth_param->auth_len >>= 3; + + cipher_len = op->sym->aead.data.length; + cipher_ofs = op->sym->aead.data.offset; + auth_len = op->sym->aead.data.length; + auth_ofs = op->sym->aead.data.offset; + + auth_param->u1.aad_adr = op->sym->aead.aad.phys_addr; + auth_param->auth_res_addr = op->sym->aead.digest.phys_addr; + set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset, + cipher_param, op, qat_req); + min_ofs = op->sym->aead.data.offset; } - if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || - ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) && - ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) { - auth_param->auth_len = (auth_param->auth_len >> 3) - + (auth_param->auth_off >> 3) - + (BYTE_LENGTH >> 3) - - 8; - auth_param->auth_off = 8; - } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH - && ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) { - auth_param->auth_len = (auth_param->auth_len >> 3) - + (auth_param->auth_off >> 3) - + (BYTE_LENGTH >> 3); + + if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next)) + do_sgl = 1; + + /* adjust for chain case */ + if (do_cipher && do_auth) + min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs; + + if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl)) + min_ofs = 0; + + if (unlikely(op->sym->m_dst != NULL)) { + /* Out-of-place operation (OOP) + * Don't align DMA start. DMA the minimum data-set + * so as not to overwrite data in dest buffer + */ + src_buf_start = + rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs); + dst_buf_start = + rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs); + + } else { + /* In-place operation + * Start DMA at nearest aligned address below min_ofs + */ + src_buf_start = + rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs) + & QAT_64_BTYE_ALIGN_MASK; + + if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) - + rte_pktmbuf_headroom(op->sym->m_src)) + > src_buf_start)) { + /* alignment has pushed addr ahead of start of mbuf + * so revert and take the performance hit + */ + src_buf_start = + rte_pktmbuf_mtophys_offset(op->sym->m_src, + min_ofs); + } + dst_buf_start = src_buf_start; + } + + if (do_cipher || do_aead) { + cipher_param->cipher_offset = + (uint32_t)rte_pktmbuf_mtophys_offset( + op->sym->m_src, cipher_ofs) - src_buf_start; + cipher_param->cipher_length = cipher_len; + } else { + cipher_param->cipher_offset = 0; + cipher_param->cipher_length = 0; + } + + if (do_auth || do_aead) { + auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset( + op->sym->m_src, auth_ofs) - src_buf_start; + auth_param->auth_len = auth_len; + } else { auth_param->auth_off = 0; + auth_param->auth_len = 0; } - auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr; - if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || - ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { - if (op->sym->cipher.iv.length == 12) { - /* - * For GCM a 12 bit IV is allowed, - * but we need to inform the f/w - */ - ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + qat_req->comn_mid.dst_length = + qat_req->comn_mid.src_length = + (cipher_param->cipher_offset + cipher_param->cipher_length) + > (auth_param->auth_off + auth_param->auth_len) ? + (cipher_param->cipher_offset + cipher_param->cipher_length) + : (auth_param->auth_off + auth_param->auth_len); + + if (do_sgl) { + + ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags, + QAT_COMN_PTR_TYPE_SGL); + ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start, + &qat_op_cookie->qat_sgl_list_src, + qat_req->comn_mid.src_length); + if (ret) { + PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array"); + return ret; } - if (op->sym->cipher.data.length == 0) { - /* - * GMAC - */ + + if (likely(op->sym->m_dst == NULL)) qat_req->comn_mid.dest_data_addr = qat_req->comn_mid.src_data_addr = - op->sym->auth.aad.phys_addr; - auth_param->u1.aad_adr = 0; - auth_param->auth_len = op->sym->auth.aad.length; - auth_param->u2.aad_sz = 0; - + qat_op_cookie->qat_sgl_src_phys_addr; + else { + ret = qat_sgl_fill_array(op->sym->m_dst, + dst_buf_start, + &qat_op_cookie->qat_sgl_list_dst, + qat_req->comn_mid.dst_length); + + if (ret) { + PMD_DRV_LOG(ERR, "QAT PMD Cannot " + "fill sgl array"); + return ret; + } + + qat_req->comn_mid.src_data_addr = + qat_op_cookie->qat_sgl_src_phys_addr; + qat_req->comn_mid.dest_data_addr = + qat_op_cookie->qat_sgl_dst_phys_addr; } - + } else { + qat_req->comn_mid.src_data_addr = src_buf_start; + qat_req->comn_mid.dest_data_addr = dst_buf_start; } #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX @@ -1031,12 +1380,32 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) rte_hexdump(stdout, "src_data:", rte_pktmbuf_mtod(op->sym->m_src, uint8_t*), rte_pktmbuf_data_len(op->sym->m_src)); - rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data, - op->sym->cipher.iv.length); - rte_hexdump(stdout, "digest:", op->sym->auth.digest.data, - op->sym->auth.digest.length); - rte_hexdump(stdout, "aad:", op->sym->auth.aad.data, - op->sym->auth.aad.length); + if (do_cipher) { + uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op, + uint8_t *, + ctx->cipher_iv.offset); + rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr, + ctx->cipher_iv.length); + } + + if (do_auth) { + if (ctx->auth_iv.length) { + uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op, + uint8_t *, + ctx->auth_iv.offset); + rte_hexdump(stdout, "auth iv:", auth_iv_ptr, + ctx->auth_iv.length); + } + rte_hexdump(stdout, "digest:", op->sym->auth.digest.data, + ctx->digest_length); + } + + if (do_aead) { + rte_hexdump(stdout, "digest:", op->sym->aead.digest.data, + ctx->digest_length); + rte_hexdump(stdout, "aad:", op->sym->aead.aad.data, + ctx->aad_len); + } #endif return 0; } @@ -1049,21 +1418,11 @@ static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) return data - mult; } -void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess) -{ - struct rte_cryptodev_sym_session *sess = sym_sess; - struct qat_session *s = (void *)sess->_private; - - PMD_INIT_FUNC_TRACE(); - s->cd_paddr = rte_mempool_virt2phy(mp, sess) + - offsetof(struct qat_session, cd) + - offsetof(struct rte_cryptodev_sym_session, _private); -} - -int qat_dev_config(__rte_unused struct rte_cryptodev *dev) +int qat_dev_config(__rte_unused struct rte_cryptodev *dev, + __rte_unused struct rte_cryptodev_config *config) { PMD_INIT_FUNC_TRACE(); - return -ENOTSUP; + return 0; } int qat_dev_start(__rte_unused struct rte_cryptodev *dev) @@ -1092,8 +1451,8 @@ int qat_dev_close(struct rte_cryptodev *dev) return 0; } -void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev, - struct rte_cryptodev_info *info) +void qat_dev_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *info) { struct qat_pmd_private *internals = dev->data->dev_private; @@ -1103,9 +1462,10 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev, ADF_NUM_SYM_QPS_PER_BUNDLE * ADF_NUM_BUNDLES_PER_DEV; info->feature_flags = dev->feature_flags; - info->capabilities = qat_pmd_capabilities; + info->capabilities = internals->qat_dev_capabilities; info->sym.max_nb_sessions = internals->max_nb_sessions; - info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD; + info->driver_id = cryptodev_qat_driver_id; + info->pci_dev = RTE_DEV_TO_PCI(dev->device); } } @@ -1127,9 +1487,9 @@ void qat_crypto_sym_stats_get(struct rte_cryptodev *dev, } stats->enqueued_count += qp[i]->stats.enqueued_count; - stats->dequeued_count += qp[i]->stats.enqueued_count; + stats->dequeued_count += qp[i]->stats.dequeued_count; stats->enqueue_err_count += qp[i]->stats.enqueue_err_count; - stats->dequeue_err_count += qp[i]->stats.enqueue_err_count; + stats->dequeue_err_count += qp[i]->stats.dequeue_err_count; } }