X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=inline;f=drivers%2Fcrypto%2Fqat%2Fqat_crypto.c;h=2440846eed2e251e3b268d3699ce97ac7b15f248;hb=12a4aaf1df41669caf25151d2495cc48341e3e9a;hp=386aa453b855dce61aa2fb396072f4c39ef2d965;hpb=54402696f420b23a3521a53ddae28a1364cac084;p=dpdk.git diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index 386aa453b8..2440846eed 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2017 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -60,6 +60,7 @@ #include #include #include +#include #include #include "qat_logs.h" @@ -297,6 +298,9 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, /* Get cipher xform from crypto xform chain */ cipher_xform = qat_get_cipher_xform(xform); + session->cipher_iv.offset = cipher_xform->iv.offset; + session->cipher_iv.length = cipher_xform->iv.length; + switch (cipher_xform->algo) { case RTE_CRYPTO_CIPHER_AES_CBC: if (qat_alg_validate_aes_key(cipher_xform->key.length, @@ -580,6 +584,9 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, } cipher_xform = qat_get_cipher_xform(xform); + session->auth_iv.offset = auth_xform->iv.offset; + session->auth_iv.length = auth_xform->iv.length; + if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) || (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) { @@ -641,7 +648,8 @@ qat_bpicipher_preprocess(struct qat_session *ctx, iv = last_block - block_len; else /* runt block, i.e. less than one full block */ - iv = sym_op->cipher.iv.data; + iv = rte_crypto_op_ctod_offset(op, uint8_t *, + ctx->cipher_iv.offset); #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX rte_hexdump(stdout, "BPI: src before pre-process:", last_block, @@ -696,7 +704,8 @@ qat_bpicipher_postprocess(struct qat_session *ctx, iv = dst - block_len; else /* runt block, i.e. less than one full block */ - iv = sym_op->cipher.iv.data; + iv = rte_crypto_op_ctod_offset(op, uint8_t *, + ctx->cipher_iv.offset); #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX rte_hexdump(stdout, "BPI: src before post-process:", last_block, @@ -757,7 +766,7 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, tmp_qp->stats.enqueue_err_count++; /* * This message cannot be enqueued, - * decrease number of ops that wasnt sent + * decrease number of ops that wasn't sent */ rte_atomic16_sub(&tmp_qp->inflights16, nb_ops_possible - nb_ops_sent); @@ -897,7 +906,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, uint32_t min_ofs = 0; uint64_t src_buf_start = 0, dst_buf_start = 0; uint8_t do_sgl = 0; - + uint8_t *cipher_iv_ptr = NULL; #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) { @@ -907,7 +916,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, return -EINVAL; } #endif - if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) { + if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) { PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented" " requests, op (%p) is sessionless.", op); return -EINVAL; @@ -970,25 +979,21 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, cipher_ofs = op->sym->cipher.data.offset; } + cipher_iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, + ctx->cipher_iv.offset); /* copy IV into request if it fits */ - /* - * If IV length is zero do not copy anything but still - * use request descriptor embedded IV - * - */ - if (op->sym->cipher.iv.length) { - if (op->sym->cipher.iv.length <= - sizeof(cipher_param->u.cipher_IV_array)) { - rte_memcpy(cipher_param->u.cipher_IV_array, - op->sym->cipher.iv.data, - op->sym->cipher.iv.length); - } else { - ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_CIPH_IV_64BIT_PTR); - cipher_param->u.s.cipher_IV_ptr = - op->sym->cipher.iv.phys_addr; - } + if (ctx->cipher_iv.length <= + sizeof(cipher_param->u.cipher_IV_array)) { + rte_memcpy(cipher_param->u.cipher_IV_array, + cipher_iv_ptr, + ctx->cipher_iv.length); + } else { + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_CIPH_IV_64BIT_PTR); + cipher_param->u.s.cipher_IV_ptr = + rte_crypto_op_ctophys_offset(op, + ctx->cipher_iv.offset); } min_ofs = cipher_ofs; } @@ -1019,7 +1024,10 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, auth_len = auth_len + auth_ofs + 1; auth_ofs = 0; } - } + } else + auth_param->u1.aad_adr = + rte_crypto_op_ctophys_offset(op, + ctx->auth_iv.offset); } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || @@ -1027,16 +1035,17 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { auth_ofs = op->sym->cipher.data.offset; auth_len = op->sym->cipher.data.length; + + auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr; } else { auth_ofs = op->sym->auth.data.offset; auth_len = op->sym->auth.data.length; + } min_ofs = auth_ofs; auth_param->auth_res_addr = op->sym->auth.digest.phys_addr; - auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr; - } if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next)) @@ -1144,9 +1153,9 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { - if (op->sym->cipher.iv.length == 12) { + if (ctx->cipher_iv.length == 12) { /* - * For GCM a 12 bit IV is allowed, + * For GCM a 12 byte IV is allowed, * but we need to inform the f/w */ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( @@ -1166,7 +1175,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, cipher_param->cipher_length = 0; cipher_param->cipher_offset = 0; auth_param->u1.aad_adr = 0; - auth_param->auth_len = op->sym->auth.aad.length; + auth_param->auth_len = ctx->aad_len; auth_param->auth_off = op->sym->auth.data.offset; auth_param->u2.aad_sz = 0; } @@ -1178,12 +1187,23 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, rte_hexdump(stdout, "src_data:", rte_pktmbuf_mtod(op->sym->m_src, uint8_t*), rte_pktmbuf_data_len(op->sym->m_src)); - rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data, - op->sym->cipher.iv.length); - rte_hexdump(stdout, "digest:", op->sym->auth.digest.data, - op->sym->auth.digest.length); - rte_hexdump(stdout, "aad:", op->sym->auth.aad.data, - op->sym->auth.aad.length); + if (do_cipher) + rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr, + ctx->cipher_iv.length); + + if (do_auth) { + if (ctx->auth_iv.length) { + uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op, + uint8_t *, + ctx->auth_iv.offset); + rte_hexdump(stdout, "auth iv:", auth_iv_ptr, + ctx->auth_iv.length); + } + rte_hexdump(stdout, "digest:", op->sym->auth.digest.data, + op->sym->auth.digest.length); + rte_hexdump(stdout, "aad:", op->sym->auth.aad.data, + ctx->aad_len); + } #endif return 0; } @@ -1240,8 +1260,8 @@ int qat_dev_close(struct rte_cryptodev *dev) return 0; } -void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev, - struct rte_cryptodev_info *info) +void qat_dev_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *info) { struct qat_pmd_private *internals = dev->data->dev_private; @@ -1254,6 +1274,7 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev, info->capabilities = internals->qat_dev_capabilities; info->sym.max_nb_sessions = internals->max_nb_sessions; info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD; + info->pci_dev = RTE_DEV_TO_PCI(dev->device); } }