+static enum qman_cb_dqrr_result
+dpaa_sec_process_parallel_event(void *event,
+ struct qman_portal *qm __always_unused,
+ struct qman_fq *outq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bufs)
+{
+ const struct qm_fd *fd;
+ struct dpaa_sec_job *job;
+ struct dpaa_sec_op_ctx *ctx;
+ struct rte_event *ev = (struct rte_event *)event;
+
+ fd = &dqrr->fd;
+
+ /* sg is embedded in an op ctx,
+ * sg[0] is for output
+ * sg[1] for input
+ */
+ job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+ ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+ ctx->fd_status = fd->status;
+ if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ struct qm_sg_entry *sg_out;
+ uint32_t len;
+
+ sg_out = &job->sg[0];
+ hw_sg_to_cpu(sg_out);
+ len = sg_out->length;
+ ctx->op->sym->m_src->pkt_len = len;
+ ctx->op->sym->m_src->data_len = len;
+ }
+ if (!ctx->fd_status) {
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ ev->event_ptr = (void *)ctx->op;
+
+ ev->flow_id = outq->ev.flow_id;
+ ev->sub_event_type = outq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = outq->ev.sched_type;
+ ev->queue_id = outq->ev.queue_id;
+ ev->priority = outq->ev.priority;
+ *bufs = (void *)ctx->op;
+
+ rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result
+dpaa_sec_process_atomic_event(void *event,
+ struct qman_portal *qm __rte_unused,
+ struct qman_fq *outq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bufs)
+{
+ u8 index;
+ const struct qm_fd *fd;
+ struct dpaa_sec_job *job;
+ struct dpaa_sec_op_ctx *ctx;
+ struct rte_event *ev = (struct rte_event *)event;
+
+ fd = &dqrr->fd;
+
+ /* sg is embedded in an op ctx,
+ * sg[0] is for output
+ * sg[1] for input
+ */
+ job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+ ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+ ctx->fd_status = fd->status;
+ if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ struct qm_sg_entry *sg_out;
+ uint32_t len;
+
+ sg_out = &job->sg[0];
+ hw_sg_to_cpu(sg_out);
+ len = sg_out->length;
+ ctx->op->sym->m_src->pkt_len = len;
+ ctx->op->sym->m_src->data_len = len;
+ }
+ if (!ctx->fd_status) {
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ ev->event_ptr = (void *)ctx->op;
+ ev->flow_id = outq->ev.flow_id;
+ ev->sub_event_type = outq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = outq->ev.sched_type;
+ ev->queue_id = outq->ev.queue_id;
+ ev->priority = outq->ev.priority;
+
+ /* Save active dqrr entries */
+ index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
+ DPAA_PER_LCORE_DQRR_SIZE++;
+ DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
+ DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
+ ev->impl_opaque = index + 1;
+ ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
+ *bufs = (void *)ctx->op;
+
+ rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+ return qman_cb_dqrr_defer;
+}
+
+int
+dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
+ int qp_id,
+ uint16_t ch_id,
+ const struct rte_event *event)
+{
+ struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
+ struct qm_mcc_initfq opts = {0};
+
+ int ret;
+
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
+ opts.fqd.dest.channel = ch_id;
+
+ switch (event->sched_type) {
+ case RTE_SCHED_TYPE_ATOMIC:
+ opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+ /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
+ * configuration with HOLD_ACTIVE setting
+ */
+ opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
+ qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
+ break;
+ case RTE_SCHED_TYPE_ORDERED:
+ DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
+ return -1;
+ default:
+ opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
+ qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
+ break;
+ }
+
+ ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
+ if (unlikely(ret)) {
+ DPAA_SEC_ERR("unable to init caam source fq!");
+ return ret;
+ }
+
+ memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
+
+ return 0;
+}
+
+int
+dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
+ int qp_id)
+{
+ struct qm_mcc_initfq opts = {0};
+ int ret;
+ struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
+
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
+ qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
+ qp->outq.cb.ern = ern_sec_fq_handler;
+ qman_retire_fq(&qp->outq, NULL);
+ qman_oos_fq(&qp->outq);
+ ret = qman_init_fq(&qp->outq, 0, &opts);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
+ qp->outq.cb.dqrr = NULL;
+
+ return ret;
+}
+