+#define CPT_IQ_FC_LEN 128
+#define CPT_IQ_GRP_LEN 16
+
+#define CPT_IQ_NB_DESC_MULTIPLIER 40
+
+/* The effective queue size to software is (CPT_LF_Q_SIZE[SIZE_DIV40] - 1 - 8).
+ *
+ * CPT requires 320 free entries (+8). And 40 entries are required for
+ * allowing CPT to discard packet when the queues are full (+1).
+ */
+#define CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc) \
+ (PLT_DIV_CEIL(nb_desc, CPT_IQ_NB_DESC_MULTIPLIER) + 1 + 8)
+
+#define CPT_IQ_GRP_SIZE(nb_desc) \
+ (CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc) * CPT_IQ_GRP_LEN)
+
+#define CPT_LF_MAX_NB_DESC 128000
+#define CPT_LF_DEFAULT_NB_DESC 1024
+
+static void
+cpt_lf_misc_intr_enb_dis(struct roc_cpt_lf *lf, bool enb)
+{
+ /* Enable all cpt lf error irqs except RQ_DISABLED and CQ_DISABLED */
+ if (enb)
+ plt_write64((BIT_ULL(6) | BIT_ULL(5) | BIT_ULL(3) | BIT_ULL(2) |
+ BIT_ULL(1)),
+ lf->rbase + CPT_LF_MISC_INT_ENA_W1S);
+ else
+ plt_write64((BIT_ULL(6) | BIT_ULL(5) | BIT_ULL(3) | BIT_ULL(2) |
+ BIT_ULL(1)),
+ lf->rbase + CPT_LF_MISC_INT_ENA_W1C);
+}
+
+static void
+cpt_lf_misc_irq(void *param)
+{
+ struct roc_cpt_lf *lf = (struct roc_cpt_lf *)param;
+ struct dev *dev = lf->dev;
+ uint64_t intr;
+
+ intr = plt_read64(lf->rbase + CPT_LF_MISC_INT);
+ if (intr == 0)
+ return;
+
+ plt_err("Err_irq=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
+
+ /* Dump lf registers */
+ cpt_lf_print(lf);
+
+ /* Clear interrupt */
+ plt_write64(intr, lf->rbase + CPT_LF_MISC_INT);
+}
+
+static int
+cpt_lf_register_misc_irq(struct roc_cpt_lf *lf)
+{
+ struct plt_pci_device *pci_dev = lf->pci_dev;
+ struct plt_intr_handle *handle;
+ int rc, vec;
+
+ handle = &pci_dev->intr_handle;
+
+ vec = lf->msixoff + CPT_LF_INT_VEC_MISC;
+ /* Clear err interrupt */
+ cpt_lf_misc_intr_enb_dis(lf, false);
+ /* Set used interrupt vectors */
+ rc = dev_irq_register(handle, cpt_lf_misc_irq, lf, vec);
+ /* Enable all dev interrupt except for RQ_DISABLED */
+ cpt_lf_misc_intr_enb_dis(lf, true);
+
+ return rc;
+}
+
+static void
+cpt_lf_unregister_misc_irq(struct roc_cpt_lf *lf)
+{
+ struct plt_pci_device *pci_dev = lf->pci_dev;
+ struct plt_intr_handle *handle;
+ int vec;
+
+ handle = &pci_dev->intr_handle;
+
+ vec = lf->msixoff + CPT_LF_INT_VEC_MISC;
+ /* Clear err interrupt */
+ cpt_lf_misc_intr_enb_dis(lf, false);
+ dev_irq_unregister(handle, cpt_lf_misc_irq, lf, vec);
+}
+
+static void
+cpt_lf_done_intr_enb_dis(struct roc_cpt_lf *lf, bool enb)
+{
+ if (enb)
+ plt_write64(0x1, lf->rbase + CPT_LF_DONE_INT_ENA_W1S);
+ else
+ plt_write64(0x1, lf->rbase + CPT_LF_DONE_INT_ENA_W1C);
+}
+
+static void
+cpt_lf_done_irq(void *param)
+{
+ struct roc_cpt_lf *lf = param;
+ uint64_t done_wait;
+ uint64_t intr;
+
+ /* Read the number of completed requests */
+ intr = plt_read64(lf->rbase + CPT_LF_DONE);
+ if (intr == 0)
+ return;
+
+ done_wait = plt_read64(lf->rbase + CPT_LF_DONE_WAIT);
+
+ /* Acknowledge the number of completed requests */
+ plt_write64(intr, lf->rbase + CPT_LF_DONE_ACK);
+
+ plt_write64(done_wait, lf->rbase + CPT_LF_DONE_WAIT);
+}
+
+static int
+cpt_lf_register_done_irq(struct roc_cpt_lf *lf)
+{
+ struct plt_pci_device *pci_dev = lf->pci_dev;
+ struct plt_intr_handle *handle;
+ int rc, vec;
+
+ handle = &pci_dev->intr_handle;
+
+ vec = lf->msixoff + CPT_LF_INT_VEC_DONE;
+
+ /* Clear done interrupt */
+ cpt_lf_done_intr_enb_dis(lf, false);
+
+ /* Set used interrupt vectors */
+ rc = dev_irq_register(handle, cpt_lf_done_irq, lf, vec);
+
+ /* Enable done interrupt */
+ cpt_lf_done_intr_enb_dis(lf, true);
+
+ return rc;
+}
+
+static void
+cpt_lf_unregister_done_irq(struct roc_cpt_lf *lf)
+{
+ struct plt_pci_device *pci_dev = lf->pci_dev;
+ struct plt_intr_handle *handle;
+ int vec;
+
+ handle = &pci_dev->intr_handle;
+
+ vec = lf->msixoff + CPT_LF_INT_VEC_DONE;
+
+ /* Clear done interrupt */
+ cpt_lf_done_intr_enb_dis(lf, false);
+ dev_irq_unregister(handle, cpt_lf_done_irq, lf, vec);
+}
+
+static int
+cpt_lf_register_irqs(struct roc_cpt_lf *lf)
+{
+ int rc;
+
+ if (lf->msixoff == MSIX_VECTOR_INVALID) {
+ plt_err("Invalid CPTLF MSIX vector offset vector: 0x%x",
+ lf->msixoff);
+ return -EINVAL;
+ }
+
+ /* Register lf err interrupt */
+ rc = cpt_lf_register_misc_irq(lf);
+ if (rc)
+ plt_err("Error registering IRQs");
+
+ rc = cpt_lf_register_done_irq(lf);
+ if (rc)
+ plt_err("Error registering IRQs");
+
+ return rc;
+}
+
+static void
+cpt_lf_unregister_irqs(struct roc_cpt_lf *lf)
+{
+ cpt_lf_unregister_misc_irq(lf);
+ cpt_lf_unregister_done_irq(lf);
+}
+
+static void
+cpt_lf_dump(struct roc_cpt_lf *lf)
+{
+ plt_cpt_dbg("CPT LF");
+ plt_cpt_dbg("RBASE: 0x%016" PRIx64, lf->rbase);
+ plt_cpt_dbg("LMT_BASE: 0x%016" PRIx64, lf->lmt_base);
+ plt_cpt_dbg("MSIXOFF: 0x%x", lf->msixoff);
+ plt_cpt_dbg("LF_ID: 0x%x", lf->lf_id);
+ plt_cpt_dbg("NB DESC: %d", lf->nb_desc);
+ plt_cpt_dbg("FC_ADDR: 0x%016" PRIx64, (uintptr_t)lf->fc_addr);
+ plt_cpt_dbg("CQ.VADDR: 0x%016" PRIx64, (uintptr_t)lf->iq_vaddr);
+
+ plt_cpt_dbg("CPT LF REG:");
+ plt_cpt_dbg("LF_CTL[0x%016llx]: 0x%016" PRIx64, CPT_LF_CTL,
+ plt_read64(lf->rbase + CPT_LF_CTL));
+ plt_cpt_dbg("LF_INPROG[0x%016llx]: 0x%016" PRIx64, CPT_LF_INPROG,
+ plt_read64(lf->rbase + CPT_LF_INPROG));
+
+ plt_cpt_dbg("Q_BASE[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_BASE,
+ plt_read64(lf->rbase + CPT_LF_Q_BASE));
+ plt_cpt_dbg("Q_SIZE[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_SIZE,
+ plt_read64(lf->rbase + CPT_LF_Q_SIZE));
+ plt_cpt_dbg("Q_INST_PTR[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_INST_PTR,
+ plt_read64(lf->rbase + CPT_LF_Q_INST_PTR));
+ plt_cpt_dbg("Q_GRP_PTR[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_GRP_PTR,
+ plt_read64(lf->rbase + CPT_LF_Q_GRP_PTR));
+}
+
+int
+cpt_lf_outb_cfg(struct dev *dev, uint16_t sso_pf_func, uint16_t nix_pf_func,
+ uint8_t lf_id, bool ena)
+{
+ struct cpt_inline_ipsec_cfg_msg *req;
+ struct mbox *mbox = dev->mbox;
+
+ req = mbox_alloc_msg_cpt_inline_ipsec_cfg(mbox);
+ if (req == NULL)
+ return -ENOSPC;
+
+ req->dir = CPT_INLINE_OUTBOUND;
+ req->slot = lf_id;
+ if (ena) {
+ req->enable = 1;
+ req->sso_pf_func = sso_pf_func;
+ req->nix_pf_func = nix_pf_func;
+ } else {
+ req->enable = 0;
+ }
+
+ return mbox_process(mbox);
+}
+
+int
+roc_cpt_inline_ipsec_cfg(struct dev *cpt_dev, uint8_t lf_id,
+ struct roc_nix *roc_nix)
+{
+ bool ena = roc_nix ? true : false;
+ uint16_t nix_pf_func = 0;
+ uint16_t sso_pf_func = 0;
+
+ if (ena) {
+ nix_pf_func = roc_nix_get_pf_func(roc_nix);
+ sso_pf_func = idev_sso_pffunc_get();
+ }
+
+ return cpt_lf_outb_cfg(cpt_dev, sso_pf_func, nix_pf_func, lf_id, ena);
+}
+
+int
+roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
+ uint16_t param2)
+{
+ struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
+ struct cpt_rx_inline_lf_cfg_msg *req;
+ struct mbox *mbox;
+
+ mbox = cpt->dev.mbox;
+
+ req = mbox_alloc_msg_cpt_rx_inline_lf_cfg(mbox);
+ if (req == NULL)
+ return -ENOSPC;
+
+ req->sso_pf_func = idev_sso_pffunc_get();
+ req->param1 = param1;
+ req->param2 = param2;
+
+ return mbox_process(mbox);
+}
+