crypto/qat: make dequeue function generic
[dpdk.git] / drivers / crypto / qat / qat_sym.c
index ae521c2..2bae913 100644 (file)
@@ -86,12 +86,8 @@ cipher_decrypt_err:
 static inline uint32_t
 adf_modulo(uint32_t data, uint32_t shift);
 
-static inline int
-qat_sym_build_request(struct rte_crypto_op *op, uint8_t *out_msg,
-               struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
-
 static inline uint32_t
-qat_bpicipher_preprocess(struct qat_session *ctx,
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
                                struct rte_crypto_op *op)
 {
        int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
@@ -146,7 +142,7 @@ qat_bpicipher_preprocess(struct qat_session *ctx,
 }
 
 static inline uint32_t
-qat_bpicipher_postprocess(struct qat_session *ctx,
+qat_bpicipher_postprocess(struct qat_sym_session *ctx,
                                struct rte_crypto_op *op)
 {
        int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
@@ -209,14 +205,12 @@ txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
        q->csr_tail = q->tail;
 }
 
-uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-               uint16_t nb_ops)
+static uint16_t
+qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
 {
        register struct qat_queue *queue;
        struct qat_qp *tmp_qp = (struct qat_qp *)qp;
        register uint32_t nb_ops_sent = 0;
-       register struct rte_crypto_op **cur_op = ops;
        register int ret;
        uint16_t nb_ops_possible = nb_ops;
        register uint8_t *base_addr;
@@ -242,8 +236,9 @@ qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
        }
 
        while (nb_ops_sent != nb_ops_possible) {
-               ret = qat_sym_build_request(*cur_op, base_addr + tail,
-                       tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
+               ret = tmp_qp->build_request(*ops, base_addr + tail,
+                               tmp_qp->op_cookies[tail / queue->msg_size],
+                               tmp_qp->qat_dev_gen);
                if (ret != 0) {
                        tmp_qp->stats.enqueue_err_count++;
                        /*
@@ -257,8 +252,8 @@ qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
                }
 
                tail = adf_modulo(tail + queue->msg_size, queue->modulo);
+               ops++;
                nb_ops_sent++;
-               cur_op++;
        }
 kick_tail:
        queue->tail = tail;
@@ -299,68 +294,97 @@ void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
 }
 
 uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
                uint16_t nb_ops)
+{
+       return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+int
+qat_sym_process_response(void **op, uint8_t *resp,
+               __rte_unused void *op_cookie,
+               __rte_unused enum qat_device_gen qat_dev_gen)
+{
+
+       struct icp_qat_fw_comn_resp *resp_msg =
+                       (struct icp_qat_fw_comn_resp *)resp;
+       struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+                       (resp_msg->opaque_data);
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+       rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+                       sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+       if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+                       ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+                       resp_msg->comn_hdr.comn_status)) {
+
+               rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+       } else {
+               struct qat_sym_session *sess = (struct qat_sym_session *)
+                                               get_session_private_data(
+                                               rx_op->sym->session,
+                                               cryptodev_qat_driver_id);
+
+               if (sess->bpi_ctx)
+                       qat_bpicipher_postprocess(sess, rx_op);
+               rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+       }
+       *op = (void *)rx_op;
+
+       return 0;
+}
+
+static uint16_t
+qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
 {
        struct qat_queue *rx_queue, *tx_queue;
        struct qat_qp *tmp_qp = (struct qat_qp *)qp;
-       uint32_t msg_counter = 0;
-       struct rte_crypto_op *rx_op;
-       struct icp_qat_fw_comn_resp *resp_msg;
        uint32_t head;
+       uint32_t resp_counter = 0;
+       uint8_t *resp_msg;
 
        rx_queue = &(tmp_qp->rx_q);
        tx_queue = &(tmp_qp->tx_q);
        head = rx_queue->head;
-       resp_msg = (struct icp_qat_fw_comn_resp *)
-                       ((uint8_t *)rx_queue->base_addr + head);
+       resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
 
        while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
-                       msg_counter != nb_ops) {
-               rx_op = (struct rte_crypto_op *)(uintptr_t)
-                               (resp_msg->opaque_data);
+                       resp_counter != nb_ops) {
 
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
-               rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
-                       sizeof(struct icp_qat_fw_comn_resp));
-#endif
-               if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
-                               ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
-                                       resp_msg->comn_hdr.comn_status)) {
-                       rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-               } else {
-                       struct qat_session *sess = (struct qat_session *)
-                                       get_session_private_data(
-                                       rx_op->sym->session,
-                                       cryptodev_qat_driver_id);
-
-                       if (sess->bpi_ctx)
-                               qat_bpicipher_postprocess(sess, rx_op);
-                       rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-               }
+               tmp_qp->process_response(ops, resp_msg,
+                       tmp_qp->op_cookies[head / rx_queue->msg_size],
+                       tmp_qp->qat_dev_gen);
 
                head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
-               resp_msg = (struct icp_qat_fw_comn_resp *)
-                               ((uint8_t *)rx_queue->base_addr + head);
-               *ops = rx_op;
+
+               resp_msg = (uint8_t *)rx_queue->base_addr + head;
                ops++;
-               msg_counter++;
+               resp_counter++;
        }
-       if (msg_counter > 0) {
+       if (resp_counter > 0) {
                rx_queue->head = head;
-               tmp_qp->stats.dequeued_count += msg_counter;
-               rx_queue->nb_processed_responses += msg_counter;
-               tmp_qp->inflights16 -= msg_counter;
+               tmp_qp->stats.dequeued_count += resp_counter;
+               rx_queue->nb_processed_responses += resp_counter;
+               tmp_qp->inflights16 -= resp_counter;
 
                if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
                        rxq_free_desc(tmp_qp, rx_queue);
        }
        /* also check if tail needs to be advanced */
        if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
-                       tx_queue->tail != tx_queue->csr_tail) {
+               tx_queue->tail != tx_queue->csr_tail) {
                txq_write_tail(tmp_qp, tx_queue);
        }
-       return msg_counter;
+       return resp_counter;
+}
+
+uint16_t
+qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+               uint16_t nb_ops)
+{
+       return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
 }
 
 static inline int
@@ -455,12 +479,13 @@ set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
                        iv_length);
 }
 
-static inline int
-qat_sym_build_request(struct rte_crypto_op *op, uint8_t *out_msg,
-               struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
+
+int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+               void *op_cookie, enum qat_device_gen qat_dev_gen)
 {
        int ret = 0;
-       struct qat_session *ctx;
+       struct qat_sym_session *ctx;
        struct icp_qat_fw_la_cipher_req_params *cipher_param;
        struct icp_qat_fw_la_auth_req_params *auth_param;
        register struct icp_qat_fw_la_bulk_req *qat_req;
@@ -470,6 +495,9 @@ qat_sym_build_request(struct rte_crypto_op *op, uint8_t *out_msg,
        uint32_t min_ofs = 0;
        uint64_t src_buf_start = 0, dst_buf_start = 0;
        uint8_t do_sgl = 0;
+       struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+       struct qat_sym_op_cookie *cookie =
+                               (struct qat_sym_op_cookie *)op_cookie;
 
 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
        if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
@@ -485,7 +513,7 @@ qat_sym_build_request(struct rte_crypto_op *op, uint8_t *out_msg,
                return -EINVAL;
        }
 
-       ctx = (struct qat_session *)get_session_private_data(
+       ctx = (struct qat_sym_session *)get_session_private_data(
                        op->sym->session, cryptodev_qat_driver_id);
 
        if (unlikely(ctx == NULL)) {
@@ -493,7 +521,7 @@ qat_sym_build_request(struct rte_crypto_op *op, uint8_t *out_msg,
                return -EINVAL;
        }
 
-       if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
+       if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
                PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
                op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
                return -EINVAL;
@@ -806,7 +834,7 @@ qat_sym_build_request(struct rte_crypto_op *op, uint8_t *out_msg,
                ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
                                QAT_COMN_PTR_TYPE_SGL);
                ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
-                               &qat_op_cookie->qat_sgl_list_src,
+                               &cookie->qat_sgl_list_src,
                                qat_req->comn_mid.src_length);
                if (ret) {
                        PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
@@ -816,11 +844,11 @@ qat_sym_build_request(struct rte_crypto_op *op, uint8_t *out_msg,
                if (likely(op->sym->m_dst == NULL))
                        qat_req->comn_mid.dest_data_addr =
                                qat_req->comn_mid.src_data_addr =
-                               qat_op_cookie->qat_sgl_src_phys_addr;
+                               cookie->qat_sgl_src_phys_addr;
                else {
                        ret = qat_sgl_fill_array(op->sym->m_dst,
                                        dst_buf_start,
-                                       &qat_op_cookie->qat_sgl_list_dst,
+                                       &cookie->qat_sgl_list_dst,
                                                qat_req->comn_mid.dst_length);
 
                        if (ret) {
@@ -830,9 +858,9 @@ qat_sym_build_request(struct rte_crypto_op *op, uint8_t *out_msg,
                        }
 
                        qat_req->comn_mid.src_data_addr =
-                               qat_op_cookie->qat_sgl_src_phys_addr;
+                               cookie->qat_sgl_src_phys_addr;
                        qat_req->comn_mid.dest_data_addr =
-                                       qat_op_cookie->qat_sgl_dst_phys_addr;
+                                       cookie->qat_sgl_dst_phys_addr;
                }
        } else {
                qat_req->comn_mid.src_data_addr = src_buf_start;