.. note::
- Ensure that sufficient huge pages are available for your application::
+ * For CN98xx SoC, it is recommended to use even and odd DBDF VFs to achieve
+ higher performance as even VF uses one crypto engine and odd one uses
+ another crypto engine.
- echo 8 > /sys/kernel/mm/hugepages/hugepages-524288kB/nr_hugepages
+ * Ensure that sufficient huge pages are available for your application::
- Refer to :ref:`linux_gsg_hugepages` for more details.
+ echo 8 > /sys/kernel/mm/hugepages/hugepages-524288kB/nr_hugepages
+
+ Refer to :ref:`linux_gsg_hugepages` for more details.
Debugging Options
-----------------
* Updated the OCTEON TX2 crypto PMD lookaside protocol offload for IPsec with
ESN and anti-replay support.
+ * Updated the OCTEON TX2 crypto PMD with CN98xx support.
Removed Items
#define RVU_BLOCK_ADDR_SSOW (0x8ull)
#define RVU_BLOCK_ADDR_TIM (0x9ull)
#define RVU_BLOCK_ADDR_CPT0 (0xaull)
+#define RVU_BLOCK_ADDR_CPT1 (0xbull)
#define RVU_BLOCK_ADDR_NDC0 (0xcull)
#define RVU_BLOCK_ADDR_NDC1 (0xdull)
#define RVU_BLOCK_ADDR_NDC2 (0xeull)
/* Marvell OCTEON TX2 Crypto PMD device name */
#define CRYPTODEV_NAME_OCTEONTX2_PMD crypto_octeontx2
-#define OTX2_CPT_MAX_LFS 64
+#define OTX2_CPT_MAX_LFS 128
#define OTX2_CPT_MAX_QUEUES_PER_VF 64
+#define OTX2_CPT_MAX_BLKS 2
#define OTX2_CPT_PMD_VERSION 3
+#define OTX2_CPT_REVISION_ID_3 3
/**
* Device private data
/**< Number of crypto queues attached */
uint16_t lf_msixoff[OTX2_CPT_MAX_LFS];
/**< MSI-X offsets */
+ uint8_t lf_blkaddr[OTX2_CPT_MAX_LFS];
+ /**< CPT0/1 BLKADDR of LFs */
+ uint8_t cpt_revision;
+ /**< CPT revision */
uint8_t err_intr_registered:1;
/**< Are error interrupts registered? */
union cpt_eng_caps hw_caps[CPT_MAX_ENG_TYPES];
uint32_t i;
for (i = 0; i < vf->nb_queues; i++) {
- base = OTX2_CPT_LF_BAR2(vf, i);
+ base = OTX2_CPT_LF_BAR2(vf, vf->lf_blkaddr[i], i);
otx2_cpt_lf_err_intr_unregister(dev, vf->lf_msixoff[i], base);
}
}
for (i = 0; i < vf->nb_queues; i++) {
- base = OTX2_CPT_LF_BAR2(vf, i);
+ base = OTX2_CPT_LF_BAR2(vf, vf->lf_blkaddr[i], i);
ret = otx2_cpt_lf_err_intr_register(dev, vf->lf_msixoff[i],
base);
if (ret)
intr_unregister:
/* Unregister the ones already registered */
for (j = 0; j < i; j++) {
- base = OTX2_CPT_LF_BAR2(vf, j);
+ base = OTX2_CPT_LF_BAR2(vf, vf->lf_blkaddr[j], j);
otx2_cpt_lf_err_intr_unregister(dev, vf->lf_msixoff[j], base);
}
/* Set engine group mask and priority */
ret = otx2_cpt_af_reg_read(dev, OTX2_CPT_AF_LF_CTL(qp->id),
- &af_lf_ctl.u);
+ qp->blkaddr, &af_lf_ctl.u);
if (ret)
return ret;
af_lf_ctl.s.grp = grp_mask;
af_lf_ctl.s.pri = pri ? 1 : 0;
ret = otx2_cpt_af_reg_write(dev, OTX2_CPT_AF_LF_CTL(qp->id),
- af_lf_ctl.u);
+ qp->blkaddr, af_lf_ctl.u);
if (ret)
return ret;
#define OTX2_CPT_AF_LF_CTL(a) (0x27000ull | (uint64_t)(a) << 3)
#define OTX2_CPT_AF_LF_CTL2(a) (0x29000ull | (uint64_t)(a) << 3)
-#define OTX2_CPT_LF_BAR2(vf, q_id) \
+#define OTX2_CPT_LF_BAR2(vf, blk_addr, q_id) \
((vf)->otx2_dev.bar2 + \
- ((RVU_BLOCK_ADDR_CPT0 << 20) | ((q_id) << 12)))
+ ((blk_addr << 20) | ((q_id) << 12)))
#define OTX2_CPT_QUEUE_HI_PRIO 0x1
return -EPIPE;
}
+ vf->cpt_revision = rsp->cpt_revision;
memcpy(hw_caps, rsp->eng_caps,
sizeof(union cpt_eng_caps) * CPT_MAX_ENG_TYPES);
if (ret)
return -EIO;
- *nb_queues = rsp->cpt;
+ *nb_queues = rsp->cpt + rsp->cpt1;
return 0;
}
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ int blkaddr[OTX2_CPT_MAX_BLKS];
struct rsrc_attach_req *req;
+ int blknum = 0;
+ int i, ret;
+
+ blkaddr[0] = RVU_BLOCK_ADDR_CPT0;
+ blkaddr[1] = RVU_BLOCK_ADDR_CPT1;
/* Ask AF to attach required LFs */
req = otx2_mbox_alloc_msg_attach_resources(mbox);
+ if ((vf->cpt_revision == OTX2_CPT_REVISION_ID_3) &&
+ (vf->otx2_dev.pf_func & 0x1))
+ blknum = (blknum + 1) % OTX2_CPT_MAX_BLKS;
+
/* 1 LF = 1 queue */
req->cptlfs = nb_queues;
+ req->cpt_blkaddr = blkaddr[blknum];
- if (otx2_mbox_process(mbox) < 0)
+ ret = otx2_mbox_process(mbox);
+ if (ret == -ENOSPC) {
+ if (vf->cpt_revision == OTX2_CPT_REVISION_ID_3) {
+ blknum = (blknum + 1) % OTX2_CPT_MAX_BLKS;
+ req->cpt_blkaddr = blkaddr[blknum];
+ if (otx2_mbox_process(mbox) < 0)
+ return -EIO;
+ } else {
+ return -EIO;
+ }
+ } else if (ret < 0) {
return -EIO;
+ }
/* Update number of attached queues */
vf->nb_queues = nb_queues;
+ for (i = 0; i < nb_queues; i++)
+ vf->lf_blkaddr[i] = req->cpt_blkaddr;
return 0;
}
return ret;
for (i = 0; i < vf->nb_queues; i++)
- vf->lf_msixoff[i] = rsp->cptlf_msixoff[i];
+ vf->lf_msixoff[i] = (vf->lf_blkaddr[i] == RVU_BLOCK_ADDR_CPT1) ?
+ rsp->cpt1_lf_msixoff[i] : rsp->cptlf_msixoff[i];
return 0;
}
int
otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
- uint64_t *val)
+ uint8_t blkaddr, uint64_t *val)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
msg->is_write = 0;
msg->reg_offset = reg;
msg->ret_val = val;
+ msg->blkaddr = blkaddr;
ret = otx2_cpt_send_mbox_msg(vf);
if (ret < 0)
int
otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
- uint64_t val)
+ uint8_t blkaddr, uint64_t val)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
msg->is_write = 1;
msg->reg_offset = reg;
msg->val = val;
+ msg->blkaddr = blkaddr;
return otx2_cpt_send_mbox_msg(vf);
}
__rte_internal
int otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
- uint64_t *val);
+ uint8_t blkaddr, uint64_t *val);
__rte_internal
int otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
- uint64_t val);
+ uint8_t blkaddr, uint64_t val);
int otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev,
struct otx2_cpt_qp *qp, uint16_t port_id);
qp->iq_dma_addr = iova;
qp->id = qp_id;
- qp->base = OTX2_CPT_LF_BAR2(vf, qp_id);
+ qp->blkaddr = vf->lf_blkaddr[qp_id];
+ qp->base = OTX2_CPT_LF_BAR2(vf, qp->blkaddr, qp_id);
lmtline = vf->otx2_dev.bar2 +
(RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) +
struct otx2_cpt_qp {
uint32_t id;
/**< Queue pair id */
+ uint8_t blkaddr;
+ /**< CPT0/1 BLKADDR of LF */
uintptr_t base;
/**< Base address where BAR is mapped */
void *lmtline;
rte_memcpy(&qp->ev, event, sizeof(struct rte_event));
ret = otx2_cpt_af_reg_read(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
- &af_lf_ctl2.u);
+ qp->blkaddr, &af_lf_ctl2.u);
if (ret)
return ret;
af_lf_ctl2.s.sso_pf_func = otx2_sso_pf_func_get();
ret = otx2_cpt_af_reg_write(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
- af_lf_ctl2.u);
+ qp->blkaddr, af_lf_ctl2.u);
if (ret)
return ret;
memset(&qp->ev, 0, sizeof(struct rte_event));
ret = otx2_cpt_af_reg_read(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
- &af_lf_ctl2.u);
+ qp->blkaddr, &af_lf_ctl2.u);
if (ret)
return ret;
af_lf_ctl2.s.sso_pf_func = 0;
ret = otx2_cpt_af_reg_write(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
- af_lf_ctl2.u);
+ qp->blkaddr, af_lf_ctl2.u);
return ret;
}