regex/mlx5: add data path scattered mbuf process
[dpdk.git] / drivers / crypto / octeontx2 / otx2_cryptodev_ops.c
index f8d203e..cec20b5 100644 (file)
@@ -6,13 +6,19 @@
 
 #include <rte_cryptodev_pmd.h>
 #include <rte_errno.h>
+#include <rte_ethdev.h>
 
 #include "otx2_cryptodev.h"
 #include "otx2_cryptodev_capabilities.h"
 #include "otx2_cryptodev_hw_access.h"
 #include "otx2_cryptodev_mbox.h"
 #include "otx2_cryptodev_ops.h"
+#include "otx2_cryptodev_ops_helper.h"
+#include "otx2_ipsec_anti_replay.h"
+#include "otx2_ipsec_po_ops.h"
 #include "otx2_mbox.h"
+#include "otx2_sec_idev.h"
+#include "otx2_security.h"
 
 #include "cpt_hw_types.h"
 #include "cpt_pmd_logs.h"
@@ -22,6 +28,8 @@
 
 #define METABUF_POOL_CACHE_SIZE        512
 
+static uint64_t otx2_fpm_iova[CPT_EC_ID_PMAX];
+
 /* Forward declarations */
 
 static int
@@ -125,6 +133,34 @@ otx2_cpt_metabuf_mempool_destroy(struct otx2_cpt_qp *qp)
        meta_info->sg_mlen = 0;
 }
 
+static int
+otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
+{
+       static rte_atomic16_t port_offset = RTE_ATOMIC16_INIT(-1);
+       uint16_t port_id, nb_ethport = rte_eth_dev_count_avail();
+       int i, ret;
+
+       for (i = 0; i < nb_ethport; i++) {
+               port_id = rte_atomic16_add_return(&port_offset, 1) % nb_ethport;
+               if (otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
+                       break;
+       }
+
+       if (i >= nb_ethport)
+               return 0;
+
+       ret = otx2_cpt_qp_ethdev_bind(dev, qp, port_id);
+       if (ret)
+               return ret;
+
+       /* Publish inline Tx QP to eth dev security */
+       ret = otx2_sec_idev_tx_cpt_qp_add(port_id, qp);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
 static struct otx2_cpt_qp *
 otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
                   uint8_t group)
@@ -157,7 +193,7 @@ otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
        size_div40 = (iq_len + 40 - 1) / 40 + 1;
 
        /* For pending queue */
-       len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
+       len = iq_len * sizeof(uintptr_t);
 
        /* Space for instruction group memory */
        len += size_div40 * 16;
@@ -194,19 +230,20 @@ otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
        }
 
        /* Initialize pending queue */
-       qp->pend_q.rid_queue = (struct rid *)va;
+       qp->pend_q.req_queue = (uintptr_t *)va;
        qp->pend_q.enq_tail = 0;
        qp->pend_q.deq_head = 0;
        qp->pend_q.pending_count = 0;
 
-       used_len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
+       used_len = iq_len * sizeof(uintptr_t);
        used_len += size_div40 * 16;
        used_len = RTE_ALIGN(used_len, pg_sz);
        iova += used_len;
 
        qp->iq_dma_addr = iova;
        qp->id = qp_id;
-       qp->base = OTX2_CPT_LF_BAR2(vf, qp_id);
+       qp->blkaddr = vf->lf_blkaddr[qp_id];
+       qp->base = OTX2_CPT_LF_BAR2(vf, qp->blkaddr, qp_id);
 
        lmtline = vf->otx2_dev.bar2 +
                  (RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) +
@@ -216,8 +253,20 @@ otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
 
        qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0);
 
+       ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
+       if (ret && (ret != -ENOENT)) {
+               CPT_LOG_ERR("Could not delete inline configuration");
+               goto mempool_destroy;
+       }
+
        otx2_cpt_iq_disable(qp);
 
+       ret = otx2_cpt_qp_inline_cfg(dev, qp);
+       if (ret) {
+               CPT_LOG_ERR("Could not configure queue for inline IPsec");
+               goto mempool_destroy;
+       }
+
        ret = otx2_cpt_iq_enable(dev, qp, group, OTX2_CPT_QUEUE_HI_PRIO,
                                 size_div40);
        if (ret) {
@@ -243,6 +292,12 @@ otx2_cpt_qp_destroy(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
        char name[RTE_MEMZONE_NAMESIZE];
        int ret;
 
+       ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
+       if (ret && (ret != -ENOENT)) {
+               CPT_LOG_ERR("Could not delete inline configuration");
+               return ret;
+       }
+
        otx2_cpt_iq_disable(qp);
 
        otx2_cpt_metabuf_mempool_destroy(qp);
@@ -261,25 +316,64 @@ otx2_cpt_qp_destroy(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
        return 0;
 }
 
+static int
+sym_xform_verify(struct rte_crypto_sym_xform *xform)
+{
+       if (xform->next) {
+               if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+                   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+                   xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+                       return -ENOTSUP;
+
+               if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+                   xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+                   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+                       return -ENOTSUP;
+
+               if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+                   xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
+                   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+                   xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
+                       return -ENOTSUP;
+
+               if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+                   xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
+                   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+                   xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
+                       return -ENOTSUP;
+
+       } else {
+               if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+                   xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
+                   xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
+                       return -ENOTSUP;
+       }
+       return 0;
+}
+
 static int
 sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
                      struct rte_cryptodev_sym_session *sess,
                      struct rte_mempool *pool)
 {
+       struct rte_crypto_sym_xform *temp_xform = xform;
        struct cpt_sess_misc *misc;
+       vq_cmd_word3_t vq_cmd_w3;
        void *priv;
        int ret;
 
-       if (unlikely(cpt_is_algo_supported(xform))) {
-               CPT_LOG_ERR("Crypto xform not supported");
-               return -ENOTSUP;
-       }
+       ret = sym_xform_verify(xform);
+       if (unlikely(ret))
+               return ret;
 
        if (unlikely(rte_mempool_get(pool, &priv))) {
                CPT_LOG_ERR("Could not allocate session private data");
                return -ENOMEM;
        }
 
+       memset(priv, 0, sizeof(struct cpt_sess_misc) +
+                       offsetof(struct cpt_ctx, mc_ctx));
+
        misc = priv;
 
        for ( ; xform != NULL; xform = xform->next) {
@@ -304,56 +398,100 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
                        goto priv_put;
        }
 
+       if ((GET_SESS_FC_TYPE(misc) == HASH_HMAC) &&
+                       cpt_mac_len_verify(&temp_xform->auth)) {
+               CPT_LOG_ERR("MAC length is not supported");
+               ret = -ENOTSUP;
+               goto priv_put;
+       }
+
        set_sym_session_private_data(sess, driver_id, misc);
 
        misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
                             sizeof(struct cpt_sess_misc);
 
+       vq_cmd_w3.u64 = 0;
+       vq_cmd_w3.s.cptr = misc->ctx_dma_addr + offsetof(struct cpt_ctx,
+                                                        mc_ctx);
+
        /*
         * IE engines support IPsec operations
-        * SE engines support IPsec operations and Air-Crypto operations
+        * SE engines support IPsec operations, Chacha-Poly and
+        * Air-Crypto operations
         */
-       if (misc->zsk_flag)
-               misc->egrp = OTX2_CPT_EGRP_SE;
+       if (misc->zsk_flag || misc->chacha_poly)
+               vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE;
        else
-               misc->egrp = OTX2_CPT_EGRP_SE_IE;
+               vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE_IE;
+
+       misc->cpt_inst_w7 = vq_cmd_w3.u64;
 
        return 0;
 
 priv_put:
        rte_mempool_put(pool, priv);
 
-       CPT_LOG_ERR("Crypto xform not supported");
        return -ENOTSUP;
 }
 
-static void
-sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
+static __rte_always_inline void __rte_hot
+otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
+                   struct cpt_request_info *req,
+                   void *lmtline,
+                   uint64_t cpt_inst_w7)
 {
-       void *priv = get_sym_session_private_data(sess, driver_id);
-       struct rte_mempool *pool;
+       union cpt_inst_s inst;
+       uint64_t lmt_status;
 
-       if (priv == NULL)
-               return;
+       inst.u[0] = 0;
+       inst.s9x.res_addr = req->comp_baddr;
+       inst.u[2] = 0;
+       inst.u[3] = 0;
 
-       memset(priv, 0, cpt_get_session_size());
+       inst.s9x.ei0 = req->ist.ei0;
+       inst.s9x.ei1 = req->ist.ei1;
+       inst.s9x.ei2 = req->ist.ei2;
+       inst.s9x.ei3 = cpt_inst_w7;
 
-       pool = rte_mempool_from_obj(priv);
+       inst.s9x.qord = 1;
+       inst.s9x.grp = qp->ev.queue_id;
+       inst.s9x.tt = qp->ev.sched_type;
+       inst.s9x.tag = (RTE_EVENT_TYPE_CRYPTODEV << 28) |
+                       qp->ev.flow_id;
+       inst.s9x.wq_ptr = (uint64_t)req >> 3;
+       req->qp = qp;
 
-       set_sym_session_private_data(sess, driver_id, NULL);
+       do {
+               /* Copy CPT command to LMTLINE */
+               memcpy(lmtline, &inst, sizeof(inst));
+
+               /*
+                * Make sure compiler does not reorder memcpy and ldeor.
+                * LMTST transactions are always flushed from the write
+                * buffer immediately, a DMB is not required to push out
+                * LMTSTs.
+                */
+               rte_io_wmb();
+               lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
+       } while (lmt_status == 0);
 
-       rte_mempool_put(pool, priv);
 }
 
-static __rte_always_inline int32_t __hot
+static __rte_always_inline int32_t __rte_hot
 otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
                     struct pending_queue *pend_q,
-                    struct cpt_request_info *req)
+                    struct cpt_request_info *req,
+                    uint64_t cpt_inst_w7)
 {
        void *lmtline = qp->lmtline;
        union cpt_inst_s inst;
        uint64_t lmt_status;
 
+       if (qp->ca_enable) {
+               otx2_ca_enqueue_req(qp, req, lmtline, cpt_inst_w7);
+               return 0;
+       }
+
        if (unlikely(pend_q->pending_count >= OTX2_CPT_DEFAULT_CMD_QLEN))
                return -EAGAIN;
 
@@ -365,7 +503,7 @@ otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
        inst.s9x.ei0 = req->ist.ei0;
        inst.s9x.ei1 = req->ist.ei1;
        inst.s9x.ei2 = req->ist.ei2;
-       inst.s9x.ei3 = req->ist.ei3;
+       inst.s9x.ei3 = cpt_inst_w7;
 
        req->time_out = rte_get_timer_cycles() +
                        DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
@@ -380,11 +518,11 @@ otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
                 * buffer immediately, a DMB is not required to push out
                 * LMTSTs.
                 */
-               rte_cio_wmb();
+               rte_io_wmb();
                lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
        } while (lmt_status == 0);
 
-       pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)req;
+       pend_q->req_queue[pend_q->enq_tail] = (uintptr_t)req;
 
        /* We will use soft queue length here to limit requests */
        MOD_INC(pend_q->enq_tail, OTX2_CPT_DEFAULT_CMD_QLEN);
@@ -393,14 +531,91 @@ otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
        return 0;
 }
 
-static __rte_always_inline int __hot
+static __rte_always_inline int32_t __rte_hot
+otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
+                     struct rte_crypto_op *op,
+                     struct pending_queue *pend_q)
+{
+       struct cpt_qp_meta_info *minfo = &qp->meta_info;
+       struct rte_crypto_asym_op *asym_op = op->asym;
+       struct asym_op_params params = {0};
+       struct cpt_asym_sess_misc *sess;
+       uintptr_t *cop;
+       void *mdata;
+       int ret;
+
+       if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
+               CPT_LOG_ERR("Could not allocate meta buffer for request");
+               return -ENOMEM;
+       }
+
+       sess = get_asym_session_private_data(asym_op->session,
+                                            otx2_cryptodev_driver_id);
+
+       /* Store IO address of the mdata to meta_buf */
+       params.meta_buf = rte_mempool_virt2iova(mdata);
+
+       cop = mdata;
+       cop[0] = (uintptr_t)mdata;
+       cop[1] = (uintptr_t)op;
+       cop[2] = cop[3] = 0ULL;
+
+       params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t));
+       params.req->op = cop;
+
+       /* Adjust meta_buf to point to end of cpt_request_info structure */
+       params.meta_buf += (4 * sizeof(uintptr_t)) +
+                           sizeof(struct cpt_request_info);
+       switch (sess->xfrm_type) {
+       case RTE_CRYPTO_ASYM_XFORM_MODEX:
+               ret = cpt_modex_prep(&params, &sess->mod_ctx);
+               if (unlikely(ret))
+                       goto req_fail;
+               break;
+       case RTE_CRYPTO_ASYM_XFORM_RSA:
+               ret = cpt_enqueue_rsa_op(op, &params, sess);
+               if (unlikely(ret))
+                       goto req_fail;
+               break;
+       case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+               ret = cpt_enqueue_ecdsa_op(op, &params, sess, otx2_fpm_iova);
+               if (unlikely(ret))
+                       goto req_fail;
+               break;
+       case RTE_CRYPTO_ASYM_XFORM_ECPM:
+               ret = cpt_ecpm_prep(&asym_op->ecpm, &params,
+                                   sess->ec_ctx.curveid);
+               if (unlikely(ret))
+                       goto req_fail;
+               break;
+       default:
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               ret = -EINVAL;
+               goto req_fail;
+       }
+
+       ret = otx2_cpt_enqueue_req(qp, pend_q, params.req, sess->cpt_inst_w7);
+
+       if (unlikely(ret)) {
+               CPT_LOG_DP_ERR("Could not enqueue crypto req");
+               goto req_fail;
+       }
+
+       return 0;
+
+req_fail:
+       free_op_meta(mdata, minfo->pool);
+
+       return ret;
+}
+
+static __rte_always_inline int __rte_hot
 otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
                     struct pending_queue *pend_q)
 {
        struct rte_crypto_sym_op *sym_op = op->sym;
        struct cpt_request_info *req;
        struct cpt_sess_misc *sess;
-       vq_cmd_word3_t *w3;
        uint64_t cpt_op;
        void *mdata;
        int ret;
@@ -423,10 +638,7 @@ otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
                return ret;
        }
 
-       w3 = ((vq_cmd_word3_t *)(&req->ist.ei3));
-       w3->s.grp = sess->egrp;
-
-       ret = otx2_cpt_enqueue_req(qp, pend_q, req);
+       ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
 
        if (unlikely(ret)) {
                /* Free buffer allocated by fill params routines */
@@ -436,7 +648,79 @@ otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
        return ret;
 }
 
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
+otx2_cpt_enqueue_sec(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
+                    struct pending_queue *pend_q)
+{
+       uint32_t winsz, esn_low = 0, esn_hi = 0, seql = 0, seqh = 0;
+       struct rte_mbuf *m_src = op->sym->m_src;
+       struct otx2_sec_session_ipsec_lp *sess;
+       struct otx2_ipsec_po_sa_ctl *ctl_wrd;
+       struct otx2_ipsec_po_in_sa *sa;
+       struct otx2_sec_session *priv;
+       struct cpt_request_info *req;
+       uint64_t seq_in_sa, seq = 0;
+       uint8_t esn;
+       int ret;
+
+       priv = get_sec_session_private_data(op->sym->sec_session);
+       sess = &priv->ipsec.lp;
+       sa = &sess->in_sa;
+
+       ctl_wrd = &sa->ctl;
+       esn = ctl_wrd->esn_en;
+       winsz = sa->replay_win_sz;
+
+       if (ctl_wrd->direction == OTX2_IPSEC_PO_SA_DIRECTION_OUTBOUND)
+               ret = process_outb_sa(op, sess, &qp->meta_info, (void **)&req);
+       else {
+               if (winsz) {
+                       esn_low = rte_be_to_cpu_32(sa->esn_low);
+                       esn_hi = rte_be_to_cpu_32(sa->esn_hi);
+                       seql = *rte_pktmbuf_mtod_offset(m_src, uint32_t *,
+                               sizeof(struct rte_ipv4_hdr) + 4);
+                       seql = rte_be_to_cpu_32(seql);
+
+                       if (!esn)
+                               seq = (uint64_t)seql;
+                       else {
+                               seqh = anti_replay_get_seqh(winsz, seql, esn_hi,
+                                               esn_low);
+                               seq = ((uint64_t)seqh << 32) | seql;
+                       }
+
+                       if (unlikely(seq == 0))
+                               return IPSEC_ANTI_REPLAY_FAILED;
+
+                       ret = anti_replay_check(sa->replay, seq, winsz);
+                       if (unlikely(ret)) {
+                               otx2_err("Anti replay check failed");
+                               return IPSEC_ANTI_REPLAY_FAILED;
+                       }
+               }
+
+               ret = process_inb_sa(op, sess, &qp->meta_info, (void **)&req);
+       }
+
+       if (unlikely(ret)) {
+               otx2_err("Crypto req : op %p, ret 0x%x", op, ret);
+               return ret;
+       }
+
+       ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
+
+       if (winsz && esn) {
+               seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;
+               if (seq > seq_in_sa) {
+                       sa->esn_low = rte_cpu_to_be_32(seql);
+                       sa->esn_hi = rte_cpu_to_be_32(seqh);
+               }
+       }
+
+       return ret;
+}
+
+static __rte_always_inline int __rte_hot
 otx2_cpt_enqueue_sym_sessless(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
                              struct pending_queue *pend_q)
 {
@@ -446,8 +730,8 @@ otx2_cpt_enqueue_sym_sessless(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
        int ret;
 
        /* Create temporary session */
-
-       if (rte_mempool_get(qp->sess_mp, (void **)&sess))
+       sess = rte_cryptodev_sym_session_create(qp->sess_mp);
+       if (sess == NULL)
                return -ENOMEM;
 
        ret = sym_session_configure(driver_id, sym_op->xform, sess,
@@ -489,11 +773,18 @@ otx2_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
        for (count = 0; count < nb_ops; count++) {
                op = ops[count];
                if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
-                       if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+                       if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+                               ret = otx2_cpt_enqueue_sec(qp, op, pend_q);
+                       else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
                                ret = otx2_cpt_enqueue_sym(qp, op, pend_q);
                        else
                                ret = otx2_cpt_enqueue_sym_sessless(qp, op,
                                                                    pend_q);
+               } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+                       if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+                               ret = otx2_cpt_enqueue_asym(qp, op, pend_q);
+                       else
+                               break;
                } else
                        break;
 
@@ -504,11 +795,183 @@ otx2_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
        return count;
 }
 
+static __rte_always_inline void
+otx2_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
+                    struct rte_crypto_rsa_xform *rsa_ctx)
+{
+       struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
+
+       switch (rsa->op_type) {
+       case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+               rsa->cipher.length = rsa_ctx->n.length;
+               memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length);
+               break;
+       case RTE_CRYPTO_ASYM_OP_DECRYPT:
+               if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+                       rsa->message.length = rsa_ctx->n.length;
+                       memcpy(rsa->message.data, req->rptr,
+                              rsa->message.length);
+               } else {
+                       /* Get length of decrypted output */
+                       rsa->message.length = rte_cpu_to_be_16
+                                            (*((uint16_t *)req->rptr));
+                       /*
+                        * Offset output data pointer by length field
+                        * (2 bytes) and copy decrypted data.
+                        */
+                       memcpy(rsa->message.data, req->rptr + 2,
+                              rsa->message.length);
+               }
+               break;
+       case RTE_CRYPTO_ASYM_OP_SIGN:
+               rsa->sign.length = rsa_ctx->n.length;
+               memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
+               break;
+       case RTE_CRYPTO_ASYM_OP_VERIFY:
+               if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+                       rsa->sign.length = rsa_ctx->n.length;
+                       memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
+               } else {
+                       /* Get length of signed output */
+                       rsa->sign.length = rte_cpu_to_be_16
+                                         (*((uint16_t *)req->rptr));
+                       /*
+                        * Offset output data pointer by length field
+                        * (2 bytes) and copy signed data.
+                        */
+                       memcpy(rsa->sign.data, req->rptr + 2,
+                              rsa->sign.length);
+               }
+               if (memcmp(rsa->sign.data, rsa->message.data,
+                          rsa->message.length)) {
+                       CPT_LOG_DP_ERR("RSA verification failed");
+                       cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+               }
+               break;
+       default:
+               CPT_LOG_DP_DEBUG("Invalid RSA operation type");
+               cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               break;
+       }
+}
+
+static __rte_always_inline void
+otx2_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
+                              struct cpt_request_info *req,
+                              struct cpt_asym_ec_ctx *ec)
+{
+       int prime_len = ec_grp[ec->curveid].prime.length;
+
+       if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
+               return;
+
+       /* Separate out sign r and s components */
+       memcpy(ecdsa->r.data, req->rptr, prime_len);
+       memcpy(ecdsa->s.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
+              prime_len);
+       ecdsa->r.length = prime_len;
+       ecdsa->s.length = prime_len;
+}
+
+static __rte_always_inline void
+otx2_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
+                            struct cpt_request_info *req,
+                            struct cpt_asym_ec_ctx *ec)
+{
+       int prime_len = ec_grp[ec->curveid].prime.length;
+
+       memcpy(ecpm->r.x.data, req->rptr, prime_len);
+       memcpy(ecpm->r.y.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
+              prime_len);
+       ecpm->r.x.length = prime_len;
+       ecpm->r.y.length = prime_len;
+}
+
+static void
+otx2_cpt_asym_post_process(struct rte_crypto_op *cop,
+                          struct cpt_request_info *req)
+{
+       struct rte_crypto_asym_op *op = cop->asym;
+       struct cpt_asym_sess_misc *sess;
+
+       sess = get_asym_session_private_data(op->session,
+                                            otx2_cryptodev_driver_id);
+
+       switch (sess->xfrm_type) {
+       case RTE_CRYPTO_ASYM_XFORM_RSA:
+               otx2_cpt_asym_rsa_op(cop, req, &sess->rsa_ctx);
+               break;
+       case RTE_CRYPTO_ASYM_XFORM_MODEX:
+               op->modex.result.length = sess->mod_ctx.modulus.length;
+               memcpy(op->modex.result.data, req->rptr,
+                      op->modex.result.length);
+               break;
+       case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+               otx2_cpt_asym_dequeue_ecdsa_op(&op->ecdsa, req, &sess->ec_ctx);
+               break;
+       case RTE_CRYPTO_ASYM_XFORM_ECPM:
+               otx2_cpt_asym_dequeue_ecpm_op(&op->ecpm, req, &sess->ec_ctx);
+               break;
+       default:
+               CPT_LOG_DP_DEBUG("Invalid crypto xform type");
+               cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               break;
+       }
+}
+
+static void
+otx2_cpt_sec_post_process(struct rte_crypto_op *cop, uintptr_t *rsp)
+{
+       struct cpt_request_info *req = (struct cpt_request_info *)rsp[2];
+       vq_cmd_word0_t *word0 = (vq_cmd_word0_t *)&req->ist.ei0;
+       struct rte_crypto_sym_op *sym_op = cop->sym;
+       struct rte_mbuf *m = sym_op->m_src;
+       struct rte_ipv6_hdr *ip6;
+       struct rte_ipv4_hdr *ip;
+       uint16_t m_len;
+       int mdata_len;
+       char *data;
+
+       mdata_len = (int)rsp[3];
+       rte_pktmbuf_trim(m, mdata_len);
+
+       if (word0->s.opcode.major == OTX2_IPSEC_PO_PROCESS_IPSEC_INB) {
+               data = rte_pktmbuf_mtod(m, char *);
+
+               if (rsp[4] == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+                       ip = (struct rte_ipv4_hdr *)(data +
+                               OTX2_IPSEC_PO_INB_RPTR_HDR);
+                       m_len = rte_be_to_cpu_16(ip->total_length);
+               } else {
+                       ip6 = (struct rte_ipv6_hdr *)(data +
+                               OTX2_IPSEC_PO_INB_RPTR_HDR);
+                       m_len = rte_be_to_cpu_16(ip6->payload_len) +
+                               sizeof(struct rte_ipv6_hdr);
+               }
+
+               m->data_len = m_len;
+               m->pkt_len = m_len;
+               m->data_off += OTX2_IPSEC_PO_INB_RPTR_HDR;
+       }
+}
+
 static inline void
 otx2_cpt_dequeue_post_process(struct otx2_cpt_qp *qp, struct rte_crypto_op *cop,
                              uintptr_t *rsp, uint8_t cc)
 {
+       unsigned int sz;
+
        if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+               if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+                       if (likely(cc == OTX2_IPSEC_PO_CC_SUCCESS)) {
+                               otx2_cpt_sec_post_process(cop, rsp);
+                               cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+                       } else
+                               cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+                       return;
+               }
+
                if (likely(cc == NO_ERR)) {
                        /* Verify authentication data if required */
                        if (unlikely(rsp[2]))
@@ -526,56 +989,27 @@ otx2_cpt_dequeue_post_process(struct otx2_cpt_qp *qp, struct rte_crypto_op *cop,
                if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
                        sym_session_clear(otx2_cryptodev_driver_id,
                                          cop->sym->session);
+                       sz = rte_cryptodev_sym_get_existing_header_session_size(
+                                       cop->sym->session);
+                       memset(cop->sym->session, 0, sz);
                        rte_mempool_put(qp->sess_mp, cop->sym->session);
                        cop->sym->session = NULL;
                }
        }
-}
-
-static __rte_always_inline uint8_t
-otx2_cpt_compcode_get(struct cpt_request_info *req)
-{
-       volatile struct cpt_res_s_9s *res;
-       uint8_t ret;
-
-       res = (volatile struct cpt_res_s_9s *)req->completion_addr;
-
-       if (unlikely(res->compcode == CPT_9X_COMP_E_NOTDONE)) {
-               if (rte_get_timer_cycles() < req->time_out)
-                       return ERR_REQ_PENDING;
-
-               CPT_LOG_DP_ERR("Request timed out");
-               return ERR_REQ_TIMEOUT;
-       }
 
-       if (likely(res->compcode == CPT_9X_COMP_E_GOOD)) {
-               ret = NO_ERR;
-               if (unlikely(res->uc_compcode)) {
-                       ret = res->uc_compcode;
-                       CPT_LOG_DP_DEBUG("Request failed with microcode error");
-                       CPT_LOG_DP_DEBUG("MC completion code 0x%x",
-                                        res->uc_compcode);
-               }
-       } else {
-               CPT_LOG_DP_DEBUG("HW completion code 0x%x", res->compcode);
-
-               ret = res->compcode;
-               switch (res->compcode) {
-               case CPT_9X_COMP_E_INSTERR:
-                       CPT_LOG_DP_ERR("Request failed with instruction error");
-                       break;
-               case CPT_9X_COMP_E_FAULT:
-                       CPT_LOG_DP_ERR("Request failed with DMA fault");
-                       break;
-               case CPT_9X_COMP_E_HWERR:
-                       CPT_LOG_DP_ERR("Request failed with hardware error");
-                       break;
-               default:
-                       CPT_LOG_DP_ERR("Request failed with unknown completion code");
-               }
+       if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+               if (likely(cc == NO_ERR)) {
+                       cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+                       /*
+                        * Pass cpt_req_info stored in metabuf during
+                        * enqueue.
+                        */
+                       rsp = RTE_PTR_ADD(rsp, 4 * sizeof(uintptr_t));
+                       otx2_cpt_asym_post_process(cop,
+                                       (struct cpt_request_info *)rsp);
+               } else
+                       cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
        }
-
-       return ret;
 }
 
 static uint16_t
@@ -587,7 +1021,6 @@ otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
        struct cpt_request_info *req;
        struct rte_crypto_op *cop;
        uint8_t cc[nb_ops];
-       struct rid *rid;
        uintptr_t *rsp;
        void *metabuf;
 
@@ -599,8 +1032,8 @@ otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
                nb_ops = nb_pending;
 
        for (i = 0; i < nb_ops; i++) {
-               rid = &pend_q->rid_queue[pend_q->deq_head];
-               req = (struct cpt_request_info *)(rid->rid);
+               req = (struct cpt_request_info *)
+                               pend_q->req_queue[pend_q->deq_head];
 
                cc[i] = otx2_cpt_compcode_get(req);
 
@@ -631,6 +1064,15 @@ otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
        return nb_completed;
 }
 
+void
+otx2_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
+{
+       dev->enqueue_burst = otx2_cpt_enqueue_burst;
+       dev->dequeue_burst = otx2_cpt_dequeue_burst;
+
+       rte_mb();
+}
+
 /* PMD ops */
 
 static int
@@ -647,6 +1089,13 @@ otx2_cpt_dev_config(struct rte_cryptodev *dev,
 
        dev->feature_flags &= ~conf->ff_disable;
 
+       if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
+               /* Initialize shared FPM table */
+               ret = cpt_fpm_init(otx2_fpm_iova);
+               if (ret)
+                       return ret;
+       }
+
        /* Unregister error interrupts */
        if (vf->err_intr_registered)
                otx2_cpt_err_intr_unregister(dev);
@@ -680,12 +1129,18 @@ otx2_cpt_dev_config(struct rte_cryptodev *dev,
                goto queues_detach;
        }
 
-       dev->enqueue_burst = otx2_cpt_enqueue_burst;
-       dev->dequeue_burst = otx2_cpt_dequeue_burst;
+       ret = otx2_cpt_inline_init(dev);
+       if (ret) {
+               CPT_LOG_ERR("Could not enable inline IPsec");
+               goto intr_unregister;
+       }
+
+       otx2_cpt_set_enqdeq_fns(dev);
 
-       rte_mb();
        return 0;
 
+intr_unregister:
+       otx2_cpt_err_intr_unregister(dev);
 queues_detach:
        otx2_cpt_queues_detach(dev);
        return ret;
@@ -704,9 +1159,10 @@ otx2_cpt_dev_start(struct rte_cryptodev *dev)
 static void
 otx2_cpt_dev_stop(struct rte_cryptodev *dev)
 {
-       RTE_SET_USED(dev);
-
        CPT_PMD_INIT_FUNC_TRACE();
+
+       if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
+               cpt_fpm_clear();
 }
 
 static int
@@ -855,6 +1311,7 @@ otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
                          struct rte_mempool *pool)
 {
        struct cpt_asym_sess_misc *priv;
+       vq_cmd_word3_t vq_cmd_w3;
        int ret;
 
        CPT_PMD_INIT_FUNC_TRACE();
@@ -875,7 +1332,12 @@ otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
                return ret;
        }
 
+       vq_cmd_w3.u64 = 0;
+       vq_cmd_w3.s.grp = OTX2_CPT_EGRP_AE;
+       priv->cpt_inst_w7 = vq_cmd_w3.u64;
+
        set_asym_session_private_data(sess, dev->driver_id, priv);
+
        return 0;
 }
 
@@ -914,7 +1376,6 @@ struct rte_cryptodev_ops otx2_cpt_ops = {
        .stats_reset = NULL,
        .queue_pair_setup = otx2_cpt_queue_pair_setup,
        .queue_pair_release = otx2_cpt_queue_pair_release,
-       .queue_pair_count = NULL,
 
        /* Symmetric crypto ops */
        .sym_session_get_size = otx2_cpt_sym_session_get_size,