+static __rte_always_inline void
+submit_request_to_sso(struct ssows *ws, uintptr_t req,
+ struct rte_event *rsp_info)
+{
+ uint64_t add_work;
+
+ add_work = rsp_info->flow_id | (RTE_EVENT_TYPE_CRYPTODEV << 28) |
+ ((uint64_t)(rsp_info->sched_type) << 32);
+
+ if (!rsp_info->sched_type)
+ ssows_head_wait(ws);
+
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ ssovf_store_pair(add_work, req, ws->grps[rsp_info->queue_id]);
+}
+
+static inline union rte_event_crypto_metadata *
+get_event_crypto_mdata(struct rte_crypto_op *op)
+{
+ union rte_event_crypto_metadata *ec_mdata;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ ec_mdata = rte_cryptodev_sym_session_get_user_data(
+ op->sym->session);
+ else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
+ op->private_data_offset)
+ ec_mdata = (union rte_event_crypto_metadata *)
+ ((uint8_t *)op + op->private_data_offset);
+ else
+ return NULL;
+
+ return ec_mdata;
+}
+
+uint16_t __rte_hot
+otx_crypto_adapter_enqueue(void *port, struct rte_crypto_op *op)
+{
+ union rte_event_crypto_metadata *ec_mdata;
+ struct cpt_instance *instance;
+ struct cpt_request_info *req;
+ struct rte_event *rsp_info;
+ uint8_t op_type, cdev_id;
+ uint16_t qp_id;
+
+ ec_mdata = get_event_crypto_mdata(op);
+ if (unlikely(ec_mdata == NULL)) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ cdev_id = ec_mdata->request_info.cdev_id;
+ qp_id = ec_mdata->request_info.queue_pair_id;
+ rsp_info = &ec_mdata->response_info;
+ instance = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
+
+ if (unlikely(!instance->ca_enabled)) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ op_type = op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC ? OP_TYPE_SYM :
+ OP_TYPE_ASYM;
+ req = otx_cpt_enq_single(instance, op,
+ &((struct cpt_vf *)instance)->pqueue, op_type);
+ if (unlikely(req == NULL))
+ return 0;
+
+ otx_cpt_ring_dbell(instance, 1);
+ req->qp = instance;
+ submit_request_to_sso(port, (uintptr_t)req, rsp_info);
+
+ return 1;
+}
+