struct qat_alg_buf bufers[QAT_SGL_MAX_NUMBER];
} __rte_packed __rte_cache_aligned;
-struct qat_crypto_op_cookie {
+struct qat_sym_op_cookie {
struct qat_alg_buf_list qat_sgl_list_src;
struct qat_alg_buf_list qat_sgl_list_dst;
phys_addr_t qat_sgl_src_phys_addr;
phys_addr_t qat_sgl_dst_phys_addr;
};
-#endif /* _QAT_QAT_COMMON_H_ */
+#endif /* _QAT_COMMON_H_ */
if (qp->op_cookie_pool == NULL)
qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
qp->nb_descriptors,
- sizeof(struct qat_crypto_op_cookie), 64, 0,
+ sizeof(struct qat_sym_op_cookie), 64, 0,
NULL, NULL, NULL, NULL, socket_id,
0);
if (!qp->op_cookie_pool) {
goto create_err;
}
- struct qat_crypto_op_cookie *sql_cookie =
+ struct qat_sym_op_cookie *sql_cookie =
qp->op_cookies[i];
sql_cookie->qat_sgl_src_phys_addr =
rte_mempool_virt2iova(sql_cookie) +
- offsetof(struct qat_crypto_op_cookie,
+ offsetof(struct qat_sym_op_cookie,
qat_sgl_list_src);
sql_cookie->qat_sgl_dst_phys_addr =
rte_mempool_virt2iova(sql_cookie) +
- offsetof(struct qat_crypto_op_cookie,
+ offsetof(struct qat_sym_op_cookie,
qat_sgl_list_dst);
}
static inline int
qat_sym_build_request(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
+ struct qat_sym_op_cookie *qat_op_cookie, struct qat_qp *qp);
static inline uint32_t
-qat_bpicipher_preprocess(struct qat_session *ctx,
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
struct rte_crypto_op *op)
{
int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
}
static inline uint32_t
-qat_bpicipher_postprocess(struct qat_session *ctx,
+qat_bpicipher_postprocess(struct qat_sym_session *ctx,
struct rte_crypto_op *op)
{
int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
resp_msg->comn_hdr.comn_status)) {
rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
- struct qat_session *sess = (struct qat_session *)
+ struct qat_sym_session *sess =
+ (struct qat_sym_session *)
get_session_private_data(
- rx_op->sym->session,
- cryptodev_qat_driver_id);
+ rx_op->sym->session,
+ cryptodev_qat_driver_id);
if (sess->bpi_ctx)
qat_bpicipher_postprocess(sess, rx_op);
static inline int
qat_sym_build_request(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
+ struct qat_sym_op_cookie *qat_op_cookie, struct qat_qp *qp)
{
int ret = 0;
- struct qat_session *ctx;
+ struct qat_sym_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
return -EINVAL;
}
- ctx = (struct qat_session *)get_session_private_data(
+ ctx = (struct qat_sym_session *)get_session_private_data(
op->sym->session, cryptodev_qat_driver_id);
if (unlikely(ctx == NULL)) {
#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
/* number of inflights below which no tail write coalescing should occur */
-struct qat_session;
+struct qat_sym_session;
/**
* Structure associated with each queue.
PMD_INIT_FUNC_TRACE();
uint8_t index = dev->driver_id;
void *sess_priv = get_session_private_data(sess, index);
- struct qat_session *s = (struct qat_session *)sess_priv;
+ struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
if (sess_priv) {
if (s->bpi_ctx)
int
qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
- struct qat_session *session)
+ struct qat_sym_session *session)
{
struct qat_pmd_private *internals = dev->data->dev_private;
struct rte_crypto_cipher_xform *cipher_xform = NULL;
qat_sym_session_set_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *session_private)
{
- struct qat_session *session = session_private;
+ struct qat_sym_session *session = session_private;
int ret;
int qat_cmd_id;
/* Set context descriptor physical address */
session->cd_paddr = rte_mempool_virt2iova(session) +
- offsetof(struct qat_session, cd);
+ offsetof(struct qat_sym_session, cd);
session->min_qat_dev_gen = QAT_GEN1;
int
qat_sym_session_configure_auth(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
- struct qat_session *session)
+ struct qat_sym_session *session)
{
struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
struct qat_pmd_private *internals = dev->data->dev_private;
int
qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
- struct qat_session *session)
+ struct qat_sym_session *session)
{
struct rte_crypto_aead_xform *aead_xform = &xform->aead;
enum rte_crypto_auth_operation crypto_operation;
unsigned int qat_sym_session_get_private_size(
struct rte_cryptodev *dev __rte_unused)
{
- return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
+ return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
}
/* returns block size in bytes per cipher algo */
static void
qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
- enum qat_crypto_proto_flag proto_flags)
+ enum qat_sym_proto_flag proto_flags)
{
PMD_INIT_FUNC_TRACE();
header->hdr_flags =
* and set its protocol flag in both cipher and auth part of content
* descriptor building function
*/
-static enum qat_crypto_proto_flag
+static enum qat_sym_proto_flag
qat_get_crypto_proto_flag(uint16_t flags)
{
int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
- enum qat_crypto_proto_flag qat_proto_flag =
+ enum qat_sym_proto_flag qat_proto_flag =
QAT_CRYPTO_PROTO_FLAG_NONE;
switch (proto) {
return qat_proto_flag;
}
-int qat_sym_session_aead_create_cd_cipher(struct qat_session *cdesc,
+int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
uint8_t *cipherkey,
uint32_t cipherkeylen)
{
struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
enum icp_qat_hw_cipher_convert key_convert;
- enum qat_crypto_proto_flag qat_proto_flag =
+ enum qat_sym_proto_flag qat_proto_flag =
QAT_CRYPTO_PROTO_FLAG_NONE;
uint32_t total_key_size;
uint16_t cipher_offset, cd_size;
return 0;
}
-int qat_sym_session_aead_create_cd_auth(struct qat_session *cdesc,
+int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
uint8_t *authkey,
uint32_t authkeylen,
uint32_t aad_length,
uint32_t *aad_len = NULL;
uint32_t wordIndex = 0;
uint32_t *pTempKey;
- enum qat_crypto_proto_flag qat_proto_flag =
+ enum qat_sym_proto_flag qat_proto_flag =
QAT_CRYPTO_PROTO_FLAG_NONE;
PMD_INIT_FUNC_TRACE();
ICP_QAT_HW_CIPHER_KEY_CONVERT, \
ICP_QAT_HW_CIPHER_DECRYPT)
-enum qat_crypto_proto_flag {
+enum qat_sym_proto_flag {
QAT_CRYPTO_PROTO_FLAG_NONE = 0,
QAT_CRYPTO_PROTO_FLAG_CCM = 1,
QAT_CRYPTO_PROTO_FLAG_GCM = 2,
};
/* Common content descriptor */
-struct qat_alg_cd {
+struct qat_sym_cd {
struct icp_qat_hw_cipher_algo_blk cipher;
struct icp_qat_hw_auth_algo_blk hash;
} __rte_packed __rte_cache_aligned;
-struct qat_session {
+struct qat_sym_session {
enum icp_qat_fw_la_cmd_id qat_cmd;
enum icp_qat_hw_cipher_algo qat_cipher_alg;
enum icp_qat_hw_cipher_dir qat_dir;
enum icp_qat_hw_auth_algo qat_hash_alg;
enum icp_qat_hw_auth_op auth_op;
void *bpi_ctx;
- struct qat_alg_cd cd;
+ struct qat_sym_cd cd;
uint8_t *cd_cur_ptr;
phys_addr_t cd_paddr;
struct icp_qat_fw_la_bulk_req fw_req;
int
qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
- struct qat_session *session);
+ struct qat_sym_session *session);
int
qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
- struct qat_session *session);
+ struct qat_sym_session *session);
int
qat_sym_session_configure_auth(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
- struct qat_session *session);
+ struct qat_sym_session *session);
int
-qat_sym_session_aead_create_cd_cipher(struct qat_session *cd,
+qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cd,
uint8_t *enckey,
uint32_t enckeylen);
int
-qat_sym_session_aead_create_cd_auth(struct qat_session *cdesc,
+qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
uint8_t *authkey,
uint32_t authkeylen,
uint32_t aad_length,
void
qat_sym_sesssion_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
- enum qat_crypto_proto_flag proto_flags);
+ enum qat_sym_proto_flag proto_flags);
int
qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
int
uint8_t cryptodev_qat_driver_id;
-static const struct rte_cryptodev_capabilities qat_gen1_capabilities[] = {
+static const struct rte_cryptodev_capabilities qat_gen1_sym_capabilities[] = {
QAT_BASE_GEN1_SYM_CAPABILITIES,
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-static const struct rte_cryptodev_capabilities qat_gen2_capabilities[] = {
+static const struct rte_cryptodev_capabilities qat_gen2_sym_capabilities[] = {
QAT_BASE_GEN1_SYM_CAPABILITIES,
QAT_EXTRA_GEN2_SYM_CAPABILITIES,
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
switch (pci_dev->id.device_id) {
case 0x0443:
internals->qat_dev_gen = QAT_GEN1;
- internals->qat_dev_capabilities = qat_gen1_capabilities;
+ internals->qat_dev_capabilities = qat_gen1_sym_capabilities;
break;
case 0x37c9:
case 0x19e3:
case 0x6f55:
internals->qat_dev_gen = QAT_GEN2;
- internals->qat_dev_capabilities = qat_gen2_capabilities;
+ internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
break;
default:
PMD_DRV_LOG(ERR,