+ sess = (struct cpt_asym_sess_misc *)
+ asym_op->session->sess_private_data;
+
+ /* Store phys_addr of the mdata to meta_buf */
+ params.meta_buf = rte_mempool_virt2iova(mdata);
+
+ cop = mdata;
+ cop[0] = (uintptr_t)mdata;
+ cop[1] = (uintptr_t)op;
+ cop[2] = cop[3] = 0ULL;
+
+ params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t));
+ params.req->op = cop;
+
+ /* Adjust meta_buf by crypto_op data and request_info struct */
+ params.meta_buf += (4 * sizeof(uintptr_t)) +
+ sizeof(struct cpt_request_info);
+
+ switch (sess->xfrm_type) {
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ ret = cpt_modex_prep(¶ms, &sess->mod_ctx);
+ if (unlikely(ret))
+ goto req_fail;
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ ret = cpt_enqueue_rsa_op(op, ¶ms, sess);
+ if (unlikely(ret))
+ goto req_fail;
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+ ret = cpt_enqueue_ecdsa_op(op, ¶ms, sess, otx_fpm_iova);
+ if (unlikely(ret))
+ goto req_fail;
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_ECPM:
+ ret = cpt_ecpm_prep(&asym_op->ecpm, ¶ms,
+ sess->ec_ctx.curveid);
+ if (unlikely(ret))
+ goto req_fail;
+ break;
+
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ rte_errno = EINVAL;
+ goto req_fail;
+ }
+
+ req = otx_cpt_request_enqueue(instance, params.req, sess->cpt_inst_w7);
+ if (unlikely(req == NULL)) {
+ CPT_LOG_DP_ERR("Could not enqueue crypto req");
+ goto req_fail;
+ }
+
+ return req;
+
+req_fail:
+ free_op_meta(mdata, minfo->pool);
+
+ return NULL;
+}
+
+static __rte_always_inline void * __rte_hot
+otx_cpt_enq_single_sym(struct cpt_instance *instance,
+ struct rte_crypto_op *op)
+{
+ struct cpt_sess_misc *sess;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct cpt_request_info *prep_req;
+ void *mdata = NULL;
+ int ret = 0;
+ void *req;
+ uint64_t cpt_op;
+
+ sess = (struct cpt_sess_misc *)
+ get_sym_session_private_data(sym_op->session,
+ otx_cryptodev_driver_id);
+
+ cpt_op = sess->cpt_op;
+
+ if (likely(cpt_op & CPT_OP_CIPHER_MASK))
+ ret = fill_fc_params(op, sess, &instance->meta_info, &mdata,
+ (void **)&prep_req);
+ else
+ ret = fill_digest_params(op, sess, &instance->meta_info,
+ &mdata, (void **)&prep_req);
+
+ if (unlikely(ret)) {
+ CPT_LOG_DP_ERR("prep crypto req : op %p, cpt_op 0x%x "
+ "ret 0x%x", op, (unsigned int)cpt_op, ret);
+ return NULL;
+ }
+
+ /* Enqueue prepared instruction to h/w */
+ req = otx_cpt_request_enqueue(instance, prep_req, sess->cpt_inst_w7);
+ if (unlikely(req == NULL))
+ /* Buffer allocated for request preparation need to be freed */
+ free_op_meta(mdata, instance->meta_info.pool);
+
+ return req;
+}
+
+static __rte_always_inline void * __rte_hot
+otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
+ struct rte_crypto_op *op)
+{
+ const int driver_id = otx_cryptodev_driver_id;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct rte_cryptodev_sym_session *sess;
+ void *req;
+ int ret;
+
+ /* Create temporary session */
+ sess = rte_cryptodev_sym_session_create(instance->sess_mp);
+ if (sess == NULL) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ ret = sym_session_configure(driver_id, sym_op->xform, sess,
+ instance->sess_mp_priv);
+ if (ret)
+ goto sess_put;
+
+ sym_op->session = sess;
+
+ /* Enqueue op with the tmp session set */
+ req = otx_cpt_enq_single_sym(instance, op);
+ if (unlikely(req == NULL))
+ goto priv_put;
+
+ return req;
+
+priv_put:
+ sym_session_clear(driver_id, sess);
+sess_put:
+ rte_mempool_put(instance->sess_mp, sess);
+ return NULL;
+}
+
+#define OP_TYPE_SYM 0
+#define OP_TYPE_ASYM 1
+
+static __rte_always_inline void *__rte_hot
+otx_cpt_enq_single(struct cpt_instance *inst,
+ struct rte_crypto_op *op,
+ const uint8_t op_type)
+{
+ /* Check for the type */
+
+ if (op_type == OP_TYPE_SYM) {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ return otx_cpt_enq_single_sym(inst, op);
+ else
+ return otx_cpt_enq_single_sym_sessless(inst, op);
+ }
+
+ if (op_type == OP_TYPE_ASYM) {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ return otx_cpt_enq_single_asym(inst, op);
+ }
+
+ /* Should not reach here */
+ rte_errno = ENOTSUP;
+ return NULL;
+}
+
+static __rte_always_inline uint16_t __rte_hot
+otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
+ const uint8_t op_type)
+{
+ struct cpt_instance *instance = (struct cpt_instance *)qptr;
+ uint16_t count, free_slots;
+ void *req;
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ struct pending_queue *pqueue = &cptvf->pqueue;
+
+ free_slots = pending_queue_free_slots(pqueue, DEFAULT_CMD_QLEN,
+ DEFAULT_CMD_QRSVD_SLOTS);
+ if (nb_ops > free_slots)
+ nb_ops = free_slots;
+
+ count = 0;
+ while (likely(count < nb_ops)) {
+
+ /* Enqueue single op */
+ req = otx_cpt_enq_single(instance, ops[count], op_type);
+
+ if (unlikely(req == NULL))