X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Focteontx2%2Fotx2_cryptodev_mbox.c;h=812515fc1b0c62728b8754bca05d33c118a4bde1;hb=520e3f4888c508dad32da1d8c5486a7be9b0fbba;hp=b54e4075edc9b4bf8682ccf9472f947692afabac;hpb=556a273e78b8f2029f0099c8b38f229633a03ebc;p=dpdk.git diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c b/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c index b54e4075ed..812515fc1b 100644 --- a/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c +++ b/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c @@ -2,14 +2,47 @@ * Copyright (C) 2019 Marvell International Ltd. */ #include +#include #include "otx2_cryptodev.h" +#include "otx2_cryptodev_hw_access.h" #include "otx2_cryptodev_mbox.h" #include "otx2_dev.h" +#include "otx2_ethdev.h" +#include "otx2_sec_idev.h" #include "otx2_mbox.h" #include "cpt_pmd_logs.h" +int +otx2_cpt_hardware_caps_get(const struct rte_cryptodev *dev, + union cpt_eng_caps *hw_caps) +{ + struct otx2_cpt_vf *vf = dev->data->dev_private; + struct otx2_dev *otx2_dev = &vf->otx2_dev; + struct cpt_caps_rsp_msg *rsp; + int ret; + + otx2_mbox_alloc_msg_cpt_caps_get(otx2_dev->mbox); + + ret = otx2_mbox_process_msg(otx2_dev->mbox, (void *)&rsp); + if (ret) + return -EIO; + + if (rsp->cpt_pf_drv_version != OTX2_CPT_PMD_VERSION) { + otx2_err("Incompatible CPT PMD version" + "(Kernel: 0x%04x DPDK: 0x%04x)", + rsp->cpt_pf_drv_version, OTX2_CPT_PMD_VERSION); + return -EPIPE; + } + + vf->cpt_revision = rsp->cpt_revision; + memcpy(hw_caps, rsp->eng_caps, + sizeof(union cpt_eng_caps) * CPT_MAX_ENG_TYPES); + + return 0; +} + int otx2_cpt_available_queues_get(const struct rte_cryptodev *dev, uint16_t *nb_queues) @@ -25,7 +58,7 @@ otx2_cpt_available_queues_get(const struct rte_cryptodev *dev, if (ret) return -EIO; - *nb_queues = rsp->cpt; + *nb_queues = rsp->cpt + rsp->cpt1; return 0; } @@ -34,20 +67,44 @@ otx2_cpt_queues_attach(const struct rte_cryptodev *dev, uint8_t nb_queues) { struct otx2_cpt_vf *vf = dev->data->dev_private; struct otx2_mbox *mbox = vf->otx2_dev.mbox; + int blkaddr[OTX2_CPT_MAX_BLKS]; struct rsrc_attach_req *req; + int blknum = 0; + int i, ret; + + blkaddr[0] = RVU_BLOCK_ADDR_CPT0; + blkaddr[1] = RVU_BLOCK_ADDR_CPT1; /* Ask AF to attach required LFs */ req = otx2_mbox_alloc_msg_attach_resources(mbox); + if ((vf->cpt_revision == OTX2_CPT_REVISION_ID_3) && + (vf->otx2_dev.pf_func & 0x1)) + blknum = (blknum + 1) % OTX2_CPT_MAX_BLKS; + /* 1 LF = 1 queue */ req->cptlfs = nb_queues; - - if (otx2_mbox_process(mbox) < 0) + req->cpt_blkaddr = blkaddr[blknum]; + + ret = otx2_mbox_process(mbox); + if (ret == -ENOSPC) { + if (vf->cpt_revision == OTX2_CPT_REVISION_ID_3) { + blknum = (blknum + 1) % OTX2_CPT_MAX_BLKS; + req->cpt_blkaddr = blkaddr[blknum]; + if (otx2_mbox_process(mbox) < 0) + return -EIO; + } else { + return -EIO; + } + } else if (ret < 0) { return -EIO; + } /* Update number of attached queues */ vf->nb_queues = nb_queues; + for (i = 0; i < nb_queues; i++) + vf->lf_blkaddr[i] = req->cpt_blkaddr; return 0; } @@ -88,7 +145,8 @@ otx2_cpt_msix_offsets_get(const struct rte_cryptodev *dev) return ret; for (i = 0; i < vf->nb_queues; i++) - vf->lf_msixoff[i] = rsp->cptlf_msixoff[i]; + vf->lf_msixoff[i] = (vf->lf_blkaddr[i] == RVU_BLOCK_ADDR_CPT1) ? + rsp->cpt1_lf_msixoff[i] : rsp->cptlf_msixoff[i]; return 0; } @@ -112,7 +170,7 @@ otx2_cpt_send_mbox_msg(struct otx2_cpt_vf *vf) int otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg, - uint64_t *val) + uint8_t blkaddr, uint64_t *val) { struct otx2_cpt_vf *vf = dev->data->dev_private; struct otx2_mbox *mbox = vf->otx2_dev.mbox; @@ -134,6 +192,7 @@ otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg, msg->is_write = 0; msg->reg_offset = reg; msg->ret_val = val; + msg->blkaddr = blkaddr; ret = otx2_cpt_send_mbox_msg(vf); if (ret < 0) @@ -150,7 +209,7 @@ otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg, int otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg, - uint64_t val) + uint8_t blkaddr, uint64_t val) { struct otx2_cpt_vf *vf = dev->data->dev_private; struct otx2_mbox *mbox = vf->otx2_dev.mbox; @@ -170,6 +229,57 @@ otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg, msg->is_write = 1; msg->reg_offset = reg; msg->val = val; + msg->blkaddr = blkaddr; return otx2_cpt_send_mbox_msg(vf); } + +int +otx2_cpt_inline_init(const struct rte_cryptodev *dev) +{ + struct otx2_cpt_vf *vf = dev->data->dev_private; + struct otx2_mbox *mbox = vf->otx2_dev.mbox; + struct cpt_rx_inline_lf_cfg_msg *msg; + int ret; + + msg = otx2_mbox_alloc_msg_cpt_rx_inline_lf_cfg(mbox); + msg->sso_pf_func = otx2_sso_pf_func_get(); + + otx2_mbox_msg_send(mbox, 0); + ret = otx2_mbox_process(mbox); + if (ret < 0) + return -EIO; + + return 0; +} + +int +otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp, + uint16_t port_id) +{ + struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; + struct otx2_cpt_vf *vf = dev->data->dev_private; + struct otx2_mbox *mbox = vf->otx2_dev.mbox; + struct cpt_inline_ipsec_cfg_msg *msg; + struct otx2_eth_dev *otx2_eth_dev; + int ret; + + if (!otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id])) + return -EINVAL; + + otx2_eth_dev = otx2_eth_pmd_priv(eth_dev); + + msg = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(mbox); + msg->dir = CPT_INLINE_OUTBOUND; + msg->enable = 1; + msg->slot = qp->id; + + msg->nix_pf_func = otx2_eth_dev->pf_func; + + otx2_mbox_msg_send(mbox, 0); + ret = otx2_mbox_process(mbox); + if (ret < 0) + return -EIO; + + return 0; +}