]> git.droids-corp.org - dpdk.git/commitdiff
common/qat: define build request and dequeue operations
authorKai Ji <kai.ji@intel.com>
Wed, 23 Feb 2022 00:49:58 +0000 (08:49 +0800)
committerAkhil Goyal <gakhil@marvell.com>
Wed, 23 Feb 2022 08:59:16 +0000 (09:59 +0100)
This patch introduce build request and dequeue op function
pointers to the qat queue pair implementation. The function
pointers are assigned during qat session generation based on input
crypto operation request.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
drivers/common/qat/qat_qp.c
drivers/common/qat/qat_qp.h
drivers/compress/qat/qat_comp_pmd.c
drivers/crypto/qat/qat_asym_pmd.c
drivers/crypto/qat/qat_sym_pmd.c
drivers/crypto/qat/qat_sym_session.h

index 57ac8fefca3d7021d3abd91f053ea07ac9651f31..56234ca1a4f455259c10cdaffbf1d2110f2e0ee9 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_common.h>
@@ -547,7 +547,9 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_enqueue_op_burst(void *qp,
+               __rte_unused qat_op_build_request_t op_build_request,
+               void **ops, uint16_t nb_ops)
 {
        register struct qat_queue *queue;
        struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -814,7 +816,9 @@ kick_tail:
 }
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_dequeue_op_burst(void *qp, void **ops,
+               __rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+               uint16_t nb_ops)
 {
        struct qat_queue *rx_queue;
        struct qat_qp *tmp_qp = (struct qat_qp *)qp;
index deafb407b3d0b217402f84454038b874f507d186..66f00943a544334dbadc2f28280a614ad8b2ce3f 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 #ifndef _QAT_QP_H_
 #define _QAT_QP_H_
@@ -36,6 +36,51 @@ struct qat_queue {
        /* number of responses processed since last CSR head write */
 };
 
+/**
+ * Type define qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ *
+ * @param in_op
+ *    An input op pointer
+ * @param out_msg
+ *    out_meg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param opaque
+ *    an opaque data may be used to store context may be useful between
+ *    2 enqueue operations.
+ * @param dev_gen
+ *    qat device gen id
+ * @return
+ *   - 0 if the crypto request is build successfully,
+ *   - EINVAL if error
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+               void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+/**
+ * Type define qat_op_dequeue_t function pointer, passed in as argument
+ * in dequeue op burst, where a dequeue op assigned base on the type of
+ * crypto op.
+ *
+ * @param op
+ *    An input op pointer
+ * @param resp
+ *    qat response msg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param dequeue_err_count
+ *    dequeue error counter
+ * @return
+ *    - 0 if dequeue OP is successful
+ *    - EINVAL if error
+ **/
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+               uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE      2
+
 struct qat_qp {
        void                    *mmap_bar_addr;
        struct qat_queue        tx_q;
@@ -44,6 +89,7 @@ struct qat_qp {
        struct rte_mempool *op_cookie_pool;
        void **op_cookies;
        uint32_t nb_descriptors;
+       uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
        enum qat_device_gen qat_dev_gen;
        enum qat_service_type service_type;
        struct qat_pci_device *qat_dev;
@@ -78,13 +124,15 @@ struct qat_qp_config {
 };
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+               void **ops, uint16_t nb_ops);
 
 uint16_t
 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_dequeue_op_burst(void *qp, void **ops,
+               qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
 
 int
 qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
index da6404c0170c2473796a7ddb2f4f0ae19c27d34f..efe5d08a9912d42a2b092585c8d1cca533fbcdc5 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_malloc.h>
@@ -620,7 +620,7 @@ static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
                                   uint16_t nb_ops)
 {
-       uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+       uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
        struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
        if (ret) {
@@ -639,7 +639,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
                } else {
                        tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
                                        (compressdev_dequeue_pkt_burst_t)
-                                       qat_dequeue_op_burst;
+                                       qat_comp_pmd_enq_deq_dummy_op_burst;
                }
        }
        return ret;
index addee384e341ac9bc8ac9997d90d8671cb03dfca..9a7596b227e3cc97637904c5624062dab04e7b34 100644 (file)
@@ -62,13 +62,13 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
 uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
                                       uint16_t nb_ops)
 {
-       return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+       return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
                                       uint16_t nb_ops)
 {
-       return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+       return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
index b835245f170c04f7501bd2ea8934d506fd507356..28a26260fbe6fde1bf277344b17c3dc77190b885 100644 (file)
@@ -49,14 +49,14 @@ static uint16_t
 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
                uint16_t nb_ops)
 {
-       return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+       return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 static uint16_t
 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
                uint16_t nb_ops)
 {
-       return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+       return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
index 6ebc176729091949b9dee00472c7e247a2eb1995..fe875a7fd026b36862a1d92c256f1690c3c33e6e 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 #ifndef _QAT_SYM_SESSION_H_
 #define _QAT_SYM_SESSION_H_
@@ -63,6 +63,16 @@ enum qat_sym_proto_flag {
        QAT_CRYPTO_PROTO_FLAG_ZUC = 4
 };
 
+struct qat_sym_session;
+
+/*
+ * typedef qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ */
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+               uint8_t *out_msg, void *op_cookie);
+
 /* Common content descriptor */
 struct qat_sym_cd {
        struct icp_qat_hw_cipher_algo_blk cipher;
@@ -107,6 +117,7 @@ struct qat_sym_session {
        /* Some generations need different setup of counter */
        uint32_t slice_types;
        enum qat_sym_proto_flag qat_proto_flag;
+       qat_sym_build_request_t build_request[2];
 };
 
 int