/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
*/
#include <rte_common.h>
}
uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_enqueue_op_burst(void *qp,
+ __rte_unused qat_op_build_request_t op_build_request,
+ void **ops, uint16_t nb_ops)
{
register struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
}
uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_dequeue_op_burst(void *qp, void **ops,
+ __rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+ uint16_t nb_ops)
{
struct qat_queue *rx_queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
*/
#ifndef _QAT_QP_H_
#define _QAT_QP_H_
/* number of responses processed since last CSR head write */
};
+/**
+ * Type define qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ *
+ * @param in_op
+ * An input op pointer
+ * @param out_msg
+ * out_meg pointer
+ * @param op_cookie
+ * op cookie pointer
+ * @param opaque
+ * an opaque data may be used to store context may be useful between
+ * 2 enqueue operations.
+ * @param dev_gen
+ * qat device gen id
+ * @return
+ * - 0 if the crypto request is build successfully,
+ * - EINVAL if error
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+ void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+/**
+ * Type define qat_op_dequeue_t function pointer, passed in as argument
+ * in dequeue op burst, where a dequeue op assigned base on the type of
+ * crypto op.
+ *
+ * @param op
+ * An input op pointer
+ * @param resp
+ * qat response msg pointer
+ * @param op_cookie
+ * op cookie pointer
+ * @param dequeue_err_count
+ * dequeue error counter
+ * @return
+ * - 0 if dequeue OP is successful
+ * - EINVAL if error
+ **/
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+ uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE 2
+
struct qat_qp {
void *mmap_bar_addr;
struct qat_queue tx_q;
struct rte_mempool *op_cookie_pool;
void **op_cookies;
uint32_t nb_descriptors;
+ uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
enum qat_device_gen qat_dev_gen;
enum qat_service_type service_type;
struct qat_pci_device *qat_dev;
};
uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+ void **ops, uint16_t nb_ops);
uint16_t
qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_dequeue_op_burst(void *qp, void **ops,
+ qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
int
qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
*/
#include <rte_malloc.h>
qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
uint16_t nb_ops)
{
- uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+ uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
if (ret) {
} else {
tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
(compressdev_dequeue_pkt_burst_t)
- qat_dequeue_op_burst;
+ qat_comp_pmd_enq_deq_dummy_op_burst;
}
}
return ret;
uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+ return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
}
uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+ return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
}
/* An rte_driver is needed in the registration of both the device and the driver
qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+ return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
}
static uint16_t
qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+ return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
}
/* An rte_driver is needed in the registration of both the device and the driver
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
*/
#ifndef _QAT_SYM_SESSION_H_
#define _QAT_SYM_SESSION_H_
QAT_CRYPTO_PROTO_FLAG_ZUC = 4
};
+struct qat_sym_session;
+
+/*
+ * typedef qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ */
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+ uint8_t *out_msg, void *op_cookie);
+
/* Common content descriptor */
struct qat_sym_cd {
struct icp_qat_hw_cipher_algo_blk cipher;
/* Some generations need different setup of counter */
uint32_t slice_types;
enum qat_sym_proto_flag qat_proto_flag;
+ qat_sym_build_request_t build_request[2];
};
int