+ vf->lf_msixoff[i] = (vf->lf_blkaddr[i] == RVU_BLOCK_ADDR_CPT1) ?
+ rsp->cpt1_lf_msixoff[i] : rsp->cptlf_msixoff[i];
+
+ return 0;
+}
+
+static int
+otx2_cpt_send_mbox_msg(struct otx2_cpt_vf *vf)
+{
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ int ret;
+
+ otx2_mbox_msg_send(mbox, 0);
+
+ ret = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (ret < 0) {
+ CPT_LOG_ERR("Could not get mailbox response");
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
+ uint8_t blkaddr, uint64_t *val)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct cpt_rd_wr_reg_msg *msg;
+ int ret, off;
+
+ msg = (struct cpt_rd_wr_reg_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*msg),
+ sizeof(*msg));
+ if (msg == NULL) {
+ CPT_LOG_ERR("Could not allocate mailbox message");
+ return -EFAULT;
+ }
+
+ msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
+ msg->hdr.sig = OTX2_MBOX_REQ_SIG;
+ msg->hdr.pcifunc = vf->otx2_dev.pf_func;
+ msg->is_write = 0;
+ msg->reg_offset = reg;
+ msg->ret_val = val;
+ msg->blkaddr = blkaddr;
+
+ ret = otx2_cpt_send_mbox_msg(vf);
+ if (ret < 0)
+ return ret;
+
+ off = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ msg = (struct cpt_rd_wr_reg_msg *) ((uintptr_t)mdev->mbase + off);
+
+ *val = msg->val;
+
+ return 0;
+}
+
+int
+otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
+ uint8_t blkaddr, uint64_t val)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct cpt_rd_wr_reg_msg *msg;
+
+ msg = (struct cpt_rd_wr_reg_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*msg),
+ sizeof(*msg));
+ if (msg == NULL) {
+ CPT_LOG_ERR("Could not allocate mailbox message");
+ return -EFAULT;
+ }
+
+ msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
+ msg->hdr.sig = OTX2_MBOX_REQ_SIG;
+ msg->hdr.pcifunc = vf->otx2_dev.pf_func;
+ msg->is_write = 1;
+ msg->reg_offset = reg;
+ msg->val = val;
+ msg->blkaddr = blkaddr;
+
+ return otx2_cpt_send_mbox_msg(vf);
+}
+
+int
+otx2_cpt_inline_init(const struct rte_cryptodev *dev)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct cpt_rx_inline_lf_cfg_msg *msg;
+ int ret;
+
+ msg = otx2_mbox_alloc_msg_cpt_rx_inline_lf_cfg(mbox);
+ msg->sso_pf_func = otx2_sso_pf_func_get();
+
+ otx2_mbox_msg_send(mbox, 0);
+ ret = otx2_mbox_process(mbox);
+ if (ret < 0)
+ return -EIO;
+
+ return 0;
+}
+
+int
+otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp,
+ uint16_t port_id)
+{
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct cpt_inline_ipsec_cfg_msg *msg;
+ struct otx2_eth_dev *otx2_eth_dev;
+ int ret;
+
+ if (!otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
+ return -EINVAL;
+
+ otx2_eth_dev = otx2_eth_pmd_priv(eth_dev);
+
+ msg = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(mbox);
+ msg->dir = CPT_INLINE_OUTBOUND;
+ msg->enable = 1;
+ msg->slot = qp->id;
+
+ msg->nix_pf_func = otx2_eth_dev->pf_func;
+
+ otx2_mbox_msg_send(mbox, 0);
+ ret = otx2_mbox_process(mbox);
+ if (ret < 0)
+ return -EIO;