X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fqat%2Fqat_crypto.c;h=37d8a585b85dfb854bc4f1d8d45ebb708f59f7da;hb=98a7ea332ba3da0f74ec951595d36a616165b255;hp=2edae32a3b35be195411917986360252c43b0454;hpb=e25200fbb45df89bbb24f6381e48d6dc97d6c529;p=dpdk.git diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index 2edae32a3b..37d8a585b8 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -54,37 +54,184 @@ #include #include #include -#include #include #include #include #include #include +#include +#include #include "qat_logs.h" #include "qat_algs.h" #include "qat_crypto.h" #include "adf_transport_access_macros.h" +#define BYTE_LENGTH 8 + +static int +qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo, + struct qat_pmd_private *internals) { + int i = 0; + const struct rte_cryptodev_capabilities *capability; + + while ((capability = &(internals->qat_dev_capabilities[i++]))->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) + continue; + + if (capability->sym.cipher.algo == algo) + return 1; + } + return 0; +} + +static int +qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo, + struct qat_pmd_private *internals) { + int i = 0; + const struct rte_cryptodev_capabilities *capability; + + while ((capability = &(internals->qat_dev_capabilities[i++]))->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) + continue; + + if (capability->sym.auth.algo == algo) + return 1; + } + return 0; +} + +/** Encrypt a single partial block + * Depends on openssl libcrypto + * Uses ECB+XOR to do CFB encryption, same result, more performant + */ +static inline int +bpi_cipher_encrypt(uint8_t *src, uint8_t *dst, + uint8_t *iv, int ivlen, int srclen, + void *bpi_ctx) +{ + EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; + int encrypted_ivlen; + uint8_t encrypted_iv[16]; + int i; + + /* ECB method: encrypt the IV, then XOR this with plaintext */ + if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) + <= 0) + goto cipher_encrypt_err; + + for (i = 0; i < srclen; i++) + *(dst+i) = *(src+i)^(encrypted_iv[i]); + + return 0; + +cipher_encrypt_err: + PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed"); + return -EINVAL; +} + +/** Decrypt a single partial block + * Depends on openssl libcrypto + * Uses ECB+XOR to do CFB encryption, same result, more performant + */ +static inline int +bpi_cipher_decrypt(uint8_t *src, uint8_t *dst, + uint8_t *iv, int ivlen, int srclen, + void *bpi_ctx) +{ + EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; + int encrypted_ivlen; + uint8_t encrypted_iv[16]; + int i; + + /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */ + if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) + <= 0) + goto cipher_decrypt_err; + + for (i = 0; i < srclen; i++) + *(dst+i) = *(src+i)^(encrypted_iv[i]); + + return 0; + +cipher_decrypt_err: + PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed"); + return -EINVAL; +} + +/** Creates a context in either AES or DES in ECB mode + * Depends on openssl libcrypto + */ +static void * +bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo, + enum rte_crypto_cipher_operation direction __rte_unused, + uint8_t *key) +{ + const EVP_CIPHER *algo = NULL; + EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new(); + + if (ctx == NULL) + goto ctx_init_err; + + if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI) + algo = EVP_des_ecb(); + else + algo = EVP_aes_128_ecb(); + + /* IV will be ECB encrypted whether direction is encrypt or decrypt*/ + if (EVP_EncryptInit_ex(ctx, algo, NULL, key, 0) != 1) + goto ctx_init_err; + + return ctx; + +ctx_init_err: + if (ctx != NULL) + EVP_CIPHER_CTX_free(ctx); + return NULL; +} + +/** Frees a context previously created + * Depends on openssl libcrypto + */ +static void +bpi_cipher_ctx_free(void *bpi_ctx) +{ + if (bpi_ctx != NULL) + EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx); +} static inline uint32_t adf_modulo(uint32_t data, uint32_t shift); static inline int -qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg); +qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, + struct qat_crypto_op_cookie *qat_op_cookie); void qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session) { struct qat_session *sess = session; - phys_addr_t cd_paddr = sess->cd_paddr; + phys_addr_t cd_paddr; PMD_INIT_FUNC_TRACE(); - if (session) { + if (sess) { + if (sess->bpi_ctx) { + bpi_cipher_ctx_free(sess->bpi_ctx); + sess->bpi_ctx = NULL; + } + cd_paddr = sess->cd_paddr; memset(sess, 0, qat_crypto_sym_get_session_private_size(dev)); - sess->cd_paddr = cd_paddr; - } + } else + PMD_DRV_LOG(ERR, "NULL session"); } static int @@ -143,10 +290,8 @@ void * qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *session_private) { - struct qat_pmd_private *internals = dev->data->dev_private; - struct qat_session *session = session_private; - + struct qat_pmd_private *internals = dev->data->dev_private; struct rte_crypto_cipher_xform *cipher_xform = NULL; /* Get cipher xform from crypto xform chain */ @@ -169,14 +314,111 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, } session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; break; + case RTE_CRYPTO_CIPHER_AES_CTR: + if (qat_alg_validate_aes_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid AES cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + break; + case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: + if (qat_alg_validate_snow3g_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; + break; case RTE_CRYPTO_CIPHER_NULL: - case RTE_CRYPTO_CIPHER_3DES_ECB: + session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; + break; + case RTE_CRYPTO_CIPHER_KASUMI_F8: + if (qat_alg_validate_kasumi_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE; + break; case RTE_CRYPTO_CIPHER_3DES_CBC: + if (qat_alg_validate_3des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_DES_CBC: + if (qat_alg_validate_des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid DES cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_3DES_CTR: + if (qat_alg_validate_3des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + break; + case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: + session->bpi_ctx = bpi_cipher_ctx_init( + cipher_xform->algo, + cipher_xform->op, + cipher_xform->key.data); + if (session->bpi_ctx == NULL) { + PMD_DRV_LOG(ERR, "failed to create DES BPI ctx"); + goto error_out; + } + if (qat_alg_validate_des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid DES cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_AES_DOCSISBPI: + session->bpi_ctx = bpi_cipher_ctx_init( + cipher_xform->algo, + cipher_xform->op, + cipher_xform->key.data); + if (session->bpi_ctx == NULL) { + PMD_DRV_LOG(ERR, "failed to create AES BPI ctx"); + goto error_out; + } + if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_ZUC_EEA3: + if (!qat_is_cipher_alg_supported( + cipher_xform->algo, internals)) { + PMD_DRV_LOG(ERR, "%s not supported on this device", + rte_crypto_cipher_algorithm_strings + [cipher_xform->algo]); + goto error_out; + } + if (qat_alg_validate_zuc_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; + break; + case RTE_CRYPTO_CIPHER_3DES_ECB: case RTE_CRYPTO_CIPHER_AES_ECB: - case RTE_CRYPTO_CIPHER_AES_CTR: case RTE_CRYPTO_CIPHER_AES_CCM: - case RTE_CRYPTO_CIPHER_KASUMI_F8: - PMD_DRV_LOG(ERR, "Crypto: Unsupported Cipher alg %u", + case RTE_CRYPTO_CIPHER_AES_F8: + case RTE_CRYPTO_CIPHER_AES_XTS: + case RTE_CRYPTO_CIPHER_ARC4: + PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u", cipher_xform->algo); goto error_out; default: @@ -198,7 +440,10 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, return session; error_out: - rte_mempool_put(internals->sess_mp, session); + if (session->bpi_ctx) { + bpi_cipher_ctx_free(session->bpi_ctx); + session->bpi_ctx = NULL; + } return NULL; } @@ -207,12 +452,9 @@ void * qat_crypto_sym_configure_session(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *session_private) { - struct qat_pmd_private *internals = dev->data->dev_private; - struct qat_session *session = session_private; int qat_cmd_id; - PMD_INIT_FUNC_TRACE(); /* Get requested QAT command id */ @@ -254,10 +496,10 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev, session->qat_cmd); goto error_out; } + return session; error_out: - rte_mempool_put(internals->sess_mp, session); return NULL; } @@ -267,19 +509,25 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, struct qat_session *session_private) { - struct qat_pmd_private *internals = dev->data->dev_private; struct qat_session *session = session_private; struct rte_crypto_auth_xform *auth_xform = NULL; struct rte_crypto_cipher_xform *cipher_xform = NULL; + struct qat_pmd_private *internals = dev->data->dev_private; auth_xform = qat_get_auth_xform(xform); switch (auth_xform->algo) { case RTE_CRYPTO_AUTH_SHA1_HMAC: session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1; break; + case RTE_CRYPTO_AUTH_SHA224_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224; + break; case RTE_CRYPTO_AUTH_SHA256_HMAC: session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256; break; + case RTE_CRYPTO_AUTH_SHA384_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384; + break; case RTE_CRYPTO_AUTH_SHA512_HMAC: session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512; break; @@ -287,25 +535,41 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC; break; case RTE_CRYPTO_AUTH_AES_GCM: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; + break; case RTE_CRYPTO_AUTH_AES_GMAC: session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; break; + case RTE_CRYPTO_AUTH_SNOW3G_UIA2: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2; + break; + case RTE_CRYPTO_AUTH_MD5_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5; + break; case RTE_CRYPTO_AUTH_NULL: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL; + break; + case RTE_CRYPTO_AUTH_KASUMI_F9: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9; + break; + case RTE_CRYPTO_AUTH_ZUC_EIA3: + if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) { + PMD_DRV_LOG(ERR, "%s not supported on this device", + rte_crypto_auth_algorithm_strings + [auth_xform->algo]); + goto error_out; + } + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3; + break; case RTE_CRYPTO_AUTH_SHA1: case RTE_CRYPTO_AUTH_SHA256: case RTE_CRYPTO_AUTH_SHA512: case RTE_CRYPTO_AUTH_SHA224: - case RTE_CRYPTO_AUTH_SHA224_HMAC: case RTE_CRYPTO_AUTH_SHA384: - case RTE_CRYPTO_AUTH_SHA384_HMAC: case RTE_CRYPTO_AUTH_MD5: - case RTE_CRYPTO_AUTH_MD5_HMAC: case RTE_CRYPTO_AUTH_AES_CCM: - case RTE_CRYPTO_AUTH_KASUMI_F9: - case RTE_CRYPTO_AUTH_SNOW3G_UIA2: case RTE_CRYPTO_AUTH_AES_CMAC: case RTE_CRYPTO_AUTH_AES_CBC_MAC: - case RTE_CRYPTO_AUTH_ZUC_EIA3: PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u", auth_xform->algo); goto error_out; @@ -323,20 +587,21 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, cipher_xform->key.data, cipher_xform->key.length, auth_xform->add_auth_data_length, - auth_xform->digest_length)) + auth_xform->digest_length, + auth_xform->op)) goto error_out; } else { if (qat_alg_aead_session_create_content_desc_auth(session, auth_xform->key.data, auth_xform->key.length, auth_xform->add_auth_data_length, - auth_xform->digest_length)) + auth_xform->digest_length, + auth_xform->op)) goto error_out; } return session; error_out: - rte_mempool_put(internals->sess_mp, session); return NULL; } @@ -346,6 +611,112 @@ unsigned qat_crypto_sym_get_session_private_size( return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8); } +static inline uint32_t +qat_bpicipher_preprocess(struct qat_session *ctx, + struct rte_crypto_op *op) +{ + uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); + struct rte_crypto_sym_op *sym_op = op->sym; + uint8_t last_block_len = sym_op->cipher.data.length % block_len; + + if (last_block_len && + ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) { + + /* Decrypt last block */ + uint8_t *last_block, *dst, *iv; + uint32_t last_block_offset = sym_op->cipher.data.offset + + sym_op->cipher.data.length - last_block_len; + last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, + uint8_t *, last_block_offset); + + if (unlikely(sym_op->m_dst != NULL)) + /* out-of-place operation (OOP) */ + dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, + uint8_t *, last_block_offset); + else + dst = last_block; + + if (last_block_len < sym_op->cipher.data.length) + /* use previous block ciphertext as IV */ + iv = last_block - block_len; + else + /* runt block, i.e. less than one full block */ + iv = sym_op->cipher.iv.data; + +#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX + rte_hexdump(stdout, "BPI: src before pre-process:", last_block, + last_block_len); + if (sym_op->m_dst != NULL) + rte_hexdump(stdout, "BPI: dst before pre-process:", dst, + last_block_len); +#endif + bpi_cipher_decrypt(last_block, dst, iv, block_len, + last_block_len, ctx->bpi_ctx); +#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX + rte_hexdump(stdout, "BPI: src after pre-process:", last_block, + last_block_len); + if (sym_op->m_dst != NULL) + rte_hexdump(stdout, "BPI: dst after pre-process:", dst, + last_block_len); +#endif + } + + return sym_op->cipher.data.length - last_block_len; +} + +static inline uint32_t +qat_bpicipher_postprocess(struct qat_session *ctx, + struct rte_crypto_op *op) +{ + uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); + struct rte_crypto_sym_op *sym_op = op->sym; + uint8_t last_block_len = sym_op->cipher.data.length % block_len; + + if (last_block_len > 0 && + ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) { + + /* Encrypt last block */ + uint8_t *last_block, *dst, *iv; + uint32_t last_block_offset; + + last_block_offset = sym_op->cipher.data.offset + + sym_op->cipher.data.length - last_block_len; + last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, + uint8_t *, last_block_offset); + + if (unlikely(sym_op->m_dst != NULL)) + /* out-of-place operation (OOP) */ + dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, + uint8_t *, last_block_offset); + else + dst = last_block; + + if (last_block_len < sym_op->cipher.data.length) + /* use previous block ciphertext as IV */ + iv = dst - block_len; + else + /* runt block, i.e. less than one full block */ + iv = sym_op->cipher.iv.data; + +#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX + rte_hexdump(stdout, "BPI: src before post-process:", last_block, + last_block_len); + if (sym_op->m_dst != NULL) + rte_hexdump(stdout, "BPI: dst before post-process:", + dst, last_block_len); +#endif + bpi_cipher_encrypt(last_block, dst, iv, block_len, + last_block_len, ctx->bpi_ctx); +#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX + rte_hexdump(stdout, "BPI: src after post-process:", last_block, + last_block_len); + if (sym_op->m_dst != NULL) + rte_hexdump(stdout, "BPI: dst after post-process:", dst, + last_block_len); +#endif + } + return sym_op->cipher.data.length - last_block_len; +} uint16_t qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, @@ -361,6 +732,9 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, register uint32_t tail; int overflow; + if (unlikely(nb_ops == 0)) + return 0; + /* read params used a lot in main loop into registers */ queue = &(tmp_qp->tx_q); base_addr = (uint8_t *)queue->base_addr; @@ -377,9 +751,16 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, } while (nb_ops_sent != nb_ops_possible) { - ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail); + ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail, + tmp_qp->op_cookies[tail / queue->msg_size]); if (ret != 0) { tmp_qp->stats.enqueue_err_count++; + /* + * This message cannot be enqueued, + * decrease number of ops that wasn't sent + */ + rte_atomic16_sub(&tmp_qp->inflights16, + nb_ops_possible - nb_ops_sent); if (nb_ops_sent == 0) return 0; goto kick_tail; @@ -418,15 +799,21 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg, - sizeof(struct icp_qat_fw_comn_resp)); + sizeof(struct icp_qat_fw_comn_resp)); + #endif if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET( resp_msg->comn_hdr.comn_status)) { rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; } else { + struct qat_session *sess = (struct qat_session *) + (rx_op->sym->session->_private); + if (sess->bpi_ctx) + qat_bpicipher_postprocess(sess, rx_op); rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; } + *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG; queue->head = adf_modulo(queue->head + queue->msg_size, @@ -449,12 +836,68 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, } static inline int -qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) +qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start, + struct qat_alg_buf_list *list, uint32_t data_len) +{ + int nr = 1; + + uint32_t buf_len = rte_pktmbuf_mtophys(buf) - + buff_start + rte_pktmbuf_data_len(buf); + + list->bufers[0].addr = buff_start; + list->bufers[0].resrvd = 0; + list->bufers[0].len = buf_len; + + if (data_len <= buf_len) { + list->num_bufs = nr; + list->bufers[0].len = data_len; + return 0; + } + + buf = buf->next; + while (buf) { + if (unlikely(nr == QAT_SGL_MAX_NUMBER)) { + PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL" + " entry(%u)", + QAT_SGL_MAX_NUMBER); + return -EINVAL; + } + + list->bufers[nr].len = rte_pktmbuf_data_len(buf); + list->bufers[nr].resrvd = 0; + list->bufers[nr].addr = rte_pktmbuf_mtophys(buf); + + buf_len += list->bufers[nr].len; + buf = buf->next; + + if (buf_len > data_len) { + list->bufers[nr].len -= + buf_len - data_len; + buf = NULL; + } + ++nr; + } + list->num_bufs = nr; + + return 0; +} + +static inline int +qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, + struct qat_crypto_op_cookie *qat_op_cookie) { + int ret = 0; struct qat_session *ctx; struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_auth_req_params *auth_param; register struct icp_qat_fw_la_bulk_req *qat_req; + uint8_t do_auth = 0, do_cipher = 0; + uint32_t cipher_len = 0, cipher_ofs = 0; + uint32_t auth_len = 0, auth_ofs = 0; + uint32_t min_ofs = 0; + uint64_t src_buf_start = 0, dst_buf_start = 0; + uint8_t do_sgl = 0; + #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) { @@ -464,69 +907,270 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) return -EINVAL; } #endif - if (unlikely(op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) { + if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) { PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented" " requests, op (%p) is sessionless.", op); return -EINVAL; } - if (unlikely(op->sym->session->type != RTE_CRYPTODEV_QAT_SYM_PMD)) { + if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) { PMD_DRV_LOG(ERR, "Session was not created for this device"); return -EINVAL; } ctx = (struct qat_session *)op->sym->session->_private; qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg; - *qat_req = ctx->fw_req; + rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; - - /* - * The following code assumes: - * - single entry buffer. - * - always in place. - */ - qat_req->comn_mid.dst_length = - qat_req->comn_mid.src_length = - rte_pktmbuf_data_len(op->sym->m_src); - qat_req->comn_mid.dest_data_addr = - qat_req->comn_mid.src_data_addr = - rte_pktmbuf_mtophys(op->sym->m_src); cipher_param = (void *)&qat_req->serv_specif_rqpars; auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); - cipher_param->cipher_length = op->sym->cipher.data.length; - cipher_param->cipher_offset = op->sym->cipher.data.offset; - if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <= - sizeof(cipher_param->u.cipher_IV_array))) { - rte_memcpy(cipher_param->u.cipher_IV_array, - op->sym->cipher.iv.data, - op->sym->cipher.iv.length); - } else { - ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_CIPH_IV_64BIT_PTR); - cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr; + if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || + ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { + do_auth = 1; + do_cipher = 1; + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) { + do_auth = 1; + do_cipher = 0; + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { + do_auth = 0; + do_cipher = 1; } - if (op->sym->auth.digest.phys_addr) { - ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); + + if (do_cipher) { + + if (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + + if (unlikely( + (cipher_param->cipher_length % BYTE_LENGTH != 0) + || (cipher_param->cipher_offset + % BYTE_LENGTH != 0))) { + PMD_DRV_LOG(ERR, + "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + cipher_len = op->sym->cipher.data.length >> 3; + cipher_ofs = op->sym->cipher.data.offset >> 3; + + } else if (ctx->bpi_ctx) { + /* DOCSIS - only send complete blocks to device + * Process any partial block using CFB mode. + * Even if 0 complete blocks, still send this to device + * to get into rx queue for post-process and dequeuing + */ + cipher_len = qat_bpicipher_preprocess(ctx, op); + cipher_ofs = op->sym->cipher.data.offset; + } else { + cipher_len = op->sym->cipher.data.length; + cipher_ofs = op->sym->cipher.data.offset; + } + + /* copy IV into request if it fits */ + /* + * If IV length is zero do not copy anything but still + * use request descriptor embedded IV + * + */ + if (op->sym->cipher.iv.length) { + if (op->sym->cipher.iv.length <= + sizeof(cipher_param->u.cipher_IV_array)) { + rte_memcpy(cipher_param->u.cipher_IV_array, + op->sym->cipher.iv.data, + op->sym->cipher.iv.length); + } else { + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_CIPH_IV_64BIT_PTR); + cipher_param->u.s.cipher_IV_ptr = + op->sym->cipher.iv.phys_addr; + } + } + min_ofs = cipher_ofs; + } + + if (do_auth) { + + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 || + ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) { + if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0) + || (auth_param->auth_len % BYTE_LENGTH != 0))) { + PMD_DRV_LOG(ERR, + "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + auth_ofs = op->sym->auth.data.offset >> 3; + auth_len = op->sym->auth.data.length >> 3; + + if (ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) { + if (do_cipher) { + auth_len = auth_len + auth_ofs + 1 - + ICP_QAT_HW_KASUMI_BLK_SZ; + auth_ofs = ICP_QAT_HW_KASUMI_BLK_SZ; + } else { + auth_len = auth_len + auth_ofs + 1; + auth_ofs = 0; + } + } + + } else if (ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { + auth_ofs = op->sym->cipher.data.offset; + auth_len = op->sym->cipher.data.length; + } else { + auth_ofs = op->sym->auth.data.offset; + auth_len = op->sym->auth.data.length; + } + min_ofs = auth_ofs; + auth_param->auth_res_addr = op->sym->auth.digest.phys_addr; + + auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr; + } - auth_param->auth_off = op->sym->auth.data.offset; - auth_param->auth_len = op->sym->auth.data.length; - auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr; - /* (GCM) aad length(240 max) will be at this location after precompute */ - if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || - ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { - auth_param->u2.aad_sz = - ALIGN_POW2_ROUNDUP(ctx->cd.hash.sha.state1[ - ICP_QAT_HW_GALOIS_128_STATE1_SZ + - ICP_QAT_HW_GALOIS_H_SZ + 3], 16); + if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next)) + do_sgl = 1; + + /* adjust for chain case */ + if (do_cipher && do_auth) + min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs; + + if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl)) + min_ofs = 0; + + if (unlikely(op->sym->m_dst != NULL)) { + /* Out-of-place operation (OOP) + * Don't align DMA start. DMA the minimum data-set + * so as not to overwrite data in dest buffer + */ + src_buf_start = + rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs); + dst_buf_start = + rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs); + + } else { + /* In-place operation + * Start DMA at nearest aligned address below min_ofs + */ + src_buf_start = + rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs) + & QAT_64_BTYE_ALIGN_MASK; + + if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) - + rte_pktmbuf_headroom(op->sym->m_src)) + > src_buf_start)) { + /* alignment has pushed addr ahead of start of mbuf + * so revert and take the performance hit + */ + src_buf_start = + rte_pktmbuf_mtophys_offset(op->sym->m_src, + min_ofs); + } + dst_buf_start = src_buf_start; } - auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3; + if (do_cipher) { + cipher_param->cipher_offset = + (uint32_t)rte_pktmbuf_mtophys_offset( + op->sym->m_src, cipher_ofs) - src_buf_start; + cipher_param->cipher_length = cipher_len; + } else { + cipher_param->cipher_offset = 0; + cipher_param->cipher_length = 0; + } + if (do_auth) { + auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset( + op->sym->m_src, auth_ofs) - src_buf_start; + auth_param->auth_len = auth_len; + } else { + auth_param->auth_off = 0; + auth_param->auth_len = 0; + } + qat_req->comn_mid.dst_length = + qat_req->comn_mid.src_length = + (cipher_param->cipher_offset + cipher_param->cipher_length) + > (auth_param->auth_off + auth_param->auth_len) ? + (cipher_param->cipher_offset + cipher_param->cipher_length) + : (auth_param->auth_off + auth_param->auth_len); + + if (do_sgl) { + + ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags, + QAT_COMN_PTR_TYPE_SGL); + ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start, + &qat_op_cookie->qat_sgl_list_src, + qat_req->comn_mid.src_length); + if (ret) { + PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array"); + return ret; + } + + if (likely(op->sym->m_dst == NULL)) + qat_req->comn_mid.dest_data_addr = + qat_req->comn_mid.src_data_addr = + qat_op_cookie->qat_sgl_src_phys_addr; + else { + ret = qat_sgl_fill_array(op->sym->m_dst, + dst_buf_start, + &qat_op_cookie->qat_sgl_list_dst, + qat_req->comn_mid.dst_length); + + if (ret) { + PMD_DRV_LOG(ERR, "QAT PMD Cannot " + "fill sgl array"); + return ret; + } + + qat_req->comn_mid.src_data_addr = + qat_op_cookie->qat_sgl_src_phys_addr; + qat_req->comn_mid.dest_data_addr = + qat_op_cookie->qat_sgl_dst_phys_addr; + } + } else { + qat_req->comn_mid.src_data_addr = src_buf_start; + qat_req->comn_mid.dest_data_addr = dst_buf_start; + } + + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { + if (op->sym->cipher.iv.length == 12) { + /* + * For GCM a 12 bit IV is allowed, + * but we need to inform the f/w + */ + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + } + if (op->sym->cipher.data.length == 0) { + /* + * GMAC + */ + qat_req->comn_mid.dest_data_addr = + qat_req->comn_mid.src_data_addr = + op->sym->auth.aad.phys_addr; + qat_req->comn_mid.dst_length = + qat_req->comn_mid.src_length = + rte_pktmbuf_data_len(op->sym->m_src); + cipher_param->cipher_length = 0; + cipher_param->cipher_offset = 0; + auth_param->u1.aad_adr = 0; + auth_param->auth_len = op->sym->auth.aad.length; + auth_param->auth_off = op->sym->auth.data.offset; + auth_param->u2.aad_sz = 0; + } + } #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX rte_hexdump(stdout, "qat_req:", qat_req, @@ -552,18 +1196,22 @@ static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) return data - mult; } -void qat_crypto_sym_session_init(struct rte_mempool *mp, void *priv_sess) +void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess) { - struct qat_session *s = priv_sess; + struct rte_cryptodev_sym_session *sess = sym_sess; + struct qat_session *s = (void *)sess->_private; PMD_INIT_FUNC_TRACE(); - s->cd_paddr = rte_mempool_virt2phy(mp, &s->cd); + s->cd_paddr = rte_mempool_virt2phy(mp, sess) + + offsetof(struct qat_session, cd) + + offsetof(struct rte_cryptodev_sym_session, _private); } -int qat_dev_config(__rte_unused struct rte_cryptodev *dev) +int qat_dev_config(__rte_unused struct rte_cryptodev *dev, + __rte_unused struct rte_cryptodev_config *config) { PMD_INIT_FUNC_TRACE(); - return -ENOTSUP; + return 0; } int qat_dev_start(__rte_unused struct rte_cryptodev *dev) @@ -602,7 +1250,8 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev, info->max_nb_queue_pairs = ADF_NUM_SYM_QPS_PER_BUNDLE * ADF_NUM_BUNDLES_PER_DEV; - + info->feature_flags = dev->feature_flags; + info->capabilities = internals->qat_dev_capabilities; info->sym.max_nb_sessions = internals->max_nb_sessions; info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD; } @@ -626,9 +1275,9 @@ void qat_crypto_sym_stats_get(struct rte_cryptodev *dev, } stats->enqueued_count += qp[i]->stats.enqueued_count; - stats->dequeued_count += qp[i]->stats.enqueued_count; + stats->dequeued_count += qp[i]->stats.dequeued_count; stats->enqueue_err_count += qp[i]->stats.enqueue_err_count; - stats->dequeue_err_count += qp[i]->stats.enqueue_err_count; + stats->dequeue_err_count += qp[i]->stats.dequeue_err_count; } }